Artin2009 commited on
Commit
2be7027
1 Parent(s): f32ea8f

Update chain_app.py

Browse files
Files changed (1) hide show
  1. chain_app.py +717 -716
chain_app.py CHANGED
@@ -1,717 +1,718 @@
1
- import chainlit as cl
2
- from gradio_client import Client
3
- from openai import OpenAI
4
- from groq import Groq
5
- import requests
6
- from chainlit.input_widget import Select, Slider
7
-
8
- hf_token = os.environ.get("HF_TOKEN")
9
- openai_api_key = os.environ.get('OPENAI_API_KEY')
10
- groq_api_key = os.environ.get('GROQ_API_KEY')
11
-
12
- hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token)
13
- # hf_image_client = Client('Artin2009/image-generation', hf_token='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')
14
- openai_client = OpenAI(api_key=openai_api_key)
15
- groq_client = Groq(api_key=groq_api_key)
16
-
17
- API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large"
18
- headers = {"Authorization": f"Bearer {hf_token}"}
19
-
20
-
21
- def query(payload):
22
- response = requests.post(API_URL, headers=headers, json=payload)
23
- return response.json()
24
-
25
- @cl.set_chat_profiles
26
- async def chat_profile():
27
- return [
28
- cl.ChatProfile(
29
- name="None",
30
- markdown_description="None",
31
- ),
32
- cl.ChatProfile(
33
- name="neural-brain-AI",
34
- markdown_description="The main model of neural brain",
35
- ),
36
- cl.ChatProfile(
37
- name="Dorna-AI",
38
- markdown_description="One of the open-sourced models that neural brain team fine-tuned",
39
- ),
40
- cl.ChatProfile(
41
- name='Image-Generation',
42
- markdown_description='Our image generation model, has a performance like midjourney',
43
- ),
44
- cl.ChatProfile(
45
- name="GPT-4",
46
- markdown_description="OpenAI's GPT-4 model",
47
- ),
48
- cl.ChatProfile(
49
- name="gpt-3.5-turbo",
50
- markdown_description="OpenAI's GPT-3.5 Turbo model",
51
- ),
52
- cl.ChatProfile(
53
- name="GPT-3.5-turbo-0125",
54
- markdown_description="OpenAI's GPT-3.5 Turbo 0125 model",
55
- ),
56
- cl.ChatProfile(
57
- name="gpt-3.5-turbo-1106",
58
- markdown_description="OpenAI's GPT-3.5 Turbo 1106 model",
59
- ),
60
- cl.ChatProfile(
61
- name="davinci-002",
62
- markdown_description="OpenAI's Davinci-002 model",
63
- ),
64
- cl.ChatProfile(
65
- name="TTS",
66
- markdown_description="OpenAI's Text-to-Speech model",
67
- ),
68
- cl.ChatProfile(
69
- name="Llama-3-70B",
70
- markdown_description="Meta Open Source model Llama-2 with 70B parameters",
71
- ),
72
- cl.ChatProfile(
73
- name="Llama-3-8B",
74
- markdown_description="Meta Open Source model Llama-2 with 7B parameters",
75
- ),
76
- cl.ChatProfile(
77
- name = "gemma-7B",
78
- markdown_description = 'Google Open Source LLM'
79
- ),
80
- cl.ChatProfile(
81
- name="zephyr-7B",
82
- markdown_description="Open Source model Zephyr with 7B parameters",
83
- ),
84
- cl.ChatProfile(
85
- name='mistral-7B',
86
- markdown_description = 'mistral open source LLM with 7B parameters'
87
- ),
88
- cl.ChatProfile(
89
- name="Toka-353M",
90
- markdown_description="PartAI Open Source model Toka with 353M parameters",
91
- )
92
- ]
93
-
94
- @cl.on_chat_start
95
- async def on_chat_start():
96
- chat_profile = cl.user_session.get("chat_profile")
97
- if chat_profile == 'neural-brain-AI':
98
- await cl.ChatSettings(
99
- [
100
- Select(
101
- id="NB-Model",
102
- label="NeuralBrain - Models",
103
- values=["Neural Brain AI"],
104
- initial_index=0,
105
- )
106
- ]
107
- ).send()
108
- await cl.Message(
109
- content="Hello, I am the main model of neural brain team, i am an instance of ChatGPT-4, This team finetuned me and i am ready to help you"
110
- ).send()
111
-
112
- if chat_profile == 'Dorna-AI':
113
- await cl.ChatSettings(
114
- [
115
- Select(
116
- id="param_3",
117
- label="Parameter 3",
118
- values=["512"], # Only one selectable value
119
- initial_index=0,
120
- tooltip="Config parameter 3 (e.g., max tokens)",
121
- ),
122
- Select(
123
- id="param_4",
124
- label="Parameter 4",
125
- values=["0.7"], # Only one selectable value
126
- initial_index=0,
127
- tooltip="Config parameter 4 (e.g., temperature)",
128
- ),
129
- Select(
130
- id="param_5",
131
- label="Parameter 5",
132
- values=["0.95"], # Only one selectable value
133
- initial_index=0,
134
- tooltip="Config parameter 5 (e.g., top_p)",
135
- ),
136
- Select(
137
- id="api_name",
138
- label="API Name",
139
- values=["/chat"],
140
- initial_index=0,
141
- ),
142
- ]
143
- ).send()
144
-
145
- await cl.Message(
146
- content='my name is Dorna, Your AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!'
147
- ).send()
148
-
149
- if chat_profile == 'Image-Generation':
150
- await cl.ChatSettings(
151
- [
152
- Slider(
153
- id="Image_Width",
154
- label="Image Width",
155
- initial=512,
156
- min=256,
157
- max=2048,
158
- step=64,
159
- tooltip="Measured in pixels",
160
- ),
161
- Slider(
162
- id="Image_Height",
163
- label="Image Height",
164
- initial=512,
165
- min=256,
166
- max=2048,
167
- step=64,
168
- tooltip="Measured in pixels",
169
- )
170
- ]).send()
171
-
172
- image = cl.Image(path='cat.png', name="result", display="inline")
173
- await cl.Message(
174
- content="I can make high quality & resoloution images for you, This is an example of what i can do!",
175
- elements=[image],
176
- ).send()
177
-
178
- if chat_profile == 'GPT-4':
179
- await cl.ChatSettings(
180
- [
181
- Select(
182
- id="OpenAI-Model",
183
- label="OpenAI - Model",
184
- values=["gpt-4"],
185
- initial_index=0,
186
- ),
187
- Slider(
188
- id="Temperature",
189
- label="Model Temperature",
190
- initial=0.7,
191
- min=0,
192
- max=1,
193
- step=0.1,
194
- ),
195
- ]
196
- ).send()
197
- await cl.Message(
198
- content="Im OpenAI's latest and biggest model. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
199
- ).send()
200
-
201
- if chat_profile == 'gpt-3.5-turbo':
202
- await cl.ChatSettings(
203
- [
204
- Select(
205
- id="OpenAI-Model",
206
- label="OpenAI - Model",
207
- values=["gpt-3.5-turbo"],
208
- initial_index=0,
209
- ),
210
- Slider(
211
- id="Temperature",
212
- label="Model Temperature",
213
- initial=0.7,
214
- min=0,
215
- max=1,
216
- step=0.1,
217
- ),
218
- ]
219
- ).send()
220
- await cl.Message(
221
- content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
222
- ).send()
223
- if chat_profile == 'GPT-3.5-turbo-0125':
224
- await cl.ChatSettings(
225
- [
226
- Select(
227
- id="OpenAI-Model",
228
- label="OpenAI - Model",
229
- values=["gpt-3.5-turbo-0125"],
230
- initial_index=0,
231
- ),
232
- Slider(
233
- id="Temperature",
234
- label="Model Temperature",
235
- initial=0.7,
236
- min=0,
237
- max=1,
238
- step=0.1,
239
- ),
240
- ]
241
- ).send()
242
- await cl.Message(
243
- content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
244
- ).send()
245
-
246
- if chat_profile == 'gpt-3.5-turbo-1106':
247
- await cl.ChatSettings(
248
- [
249
- Select(
250
- id="OpenAI-Model",
251
- label="OpenAI - Model",
252
- values=["gpt-3.5-turbo-1106"],
253
- initial_index=0,
254
- ),
255
- Slider(
256
- id="Temperature",
257
- label="Model Temperature",
258
- initial=0.7,
259
- min=0,
260
- max=1,
261
- step=0.1,
262
- ),
263
- ]
264
- ).send()
265
- await cl.Message(
266
- content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
267
- ).send()
268
-
269
- if chat_profile == 'davinci-002':
270
- await cl.ChatSettings(
271
- [
272
- Select(
273
- id="OpenAI-Model",
274
- label="OpenAI - Model",
275
- values=["davinci-002"],
276
- initial_index=0,
277
- ),
278
- Slider(
279
- id="Temperature",
280
- label="Model Temperature",
281
- initial=0.7,
282
- min=0,
283
- max=1,
284
- step=0.1,
285
- ),
286
- ]
287
- ).send()
288
- await cl.Message(
289
- content="Im one of the OpenAI's models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
290
- ).send()
291
- if chat_profile == 'TTS':
292
- await cl.Message(
293
- content="Im TTS. of the best models OpenAI ever created. i can convert text to speech! . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
294
- ).send()
295
- if chat_profile == 'Llama-3-70B':
296
- await cl.ChatSettings(
297
- [
298
- Select(
299
- id="Meta-Model",
300
- label="Meta - Model",
301
- values=["Llama-3-70B"],
302
- initial_index=0,
303
- ),
304
- Slider(
305
- id="Temperature",
306
- label="Model Temperature",
307
- initial=0.7,
308
- min=0,
309
- max=1,
310
- step=0.1,
311
- ),
312
- ]
313
- ).send()
314
- await cl.Message(
315
- content="Im the big Llama!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
316
- ).send()
317
- if chat_profile == 'Llama-3-8B':
318
- await cl.ChatSettings(
319
- [
320
- Select(
321
- id="Meta-Model",
322
- label="Meta - Model",
323
- values=["Llama-3-8B"],
324
- initial_index=0,
325
- ),
326
- Slider(
327
- id="Temperature",
328
- label="Model Temperature",
329
- initial=0.7,
330
- min=0,
331
- max=1,
332
- step=0.1,
333
- ),
334
- ]
335
- ).send()
336
- await cl.Message(
337
- content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
338
- ).send()
339
- if chat_profile == 'gemma-7B':
340
- await cl.ChatSettings(
341
- [
342
- Select(
343
- id="Google-Model",
344
- label="Google - Model",
345
- values=["Gemma-7B"],
346
- initial_index=0,
347
- ),
348
- Slider(
349
- id="Temperature",
350
- label="Model Temperature",
351
- initial=0.7,
352
- min=0,
353
- max=1,
354
- step=0.1,
355
- ),
356
- ]
357
- ).send()
358
- await cl.Message(
359
- content="Im Gemma. the small version of google open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
360
- ).send()
361
- if chat_profile == 'zephyr-7B':
362
- await cl.ChatSettings(
363
- [
364
- Select(
365
- id="zephyr-Model",
366
- label="zephyr - Model",
367
- values=["zephyr-7B"],
368
- initial_index=0,
369
- ),
370
- Slider(
371
- id="Temperature",
372
- label="Model Temperature",
373
- initial=0.7,
374
- min=0,
375
- max=1,
376
- step=0.1,
377
- ),
378
- ]
379
- ).send()
380
- await cl.Message(
381
- content="Im Zephyr. One of the best open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
382
- ).send()
383
- if chat_profile == 'mistral-7B':
384
- await cl.ChatSettings(
385
- [
386
- Select(
387
- id="Mistral-Model",
388
- label="Mistral - Model",
389
- values=["Mistral-7B"],
390
- initial_index=0,
391
- ),
392
- Slider(
393
- id="Temperature",
394
- label="Model Temperature",
395
- initial=0.7,
396
- min=0,
397
- max=1,
398
- step=0.1,
399
- ),
400
- ]
401
- ).send()
402
- await cl.Message(
403
- content="Im Mistral. the small version of Mistral Family. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
404
- ).send()
405
- if chat_profile == 'Toka-353M':
406
- await cl.ChatSettings(
407
- [
408
- Select(
409
- id="PartAI-Model",
410
- label="PartAI - Model",
411
- values=["TokaBert-353M"],
412
- initial_index=0,
413
- ),
414
- Slider(
415
- id="Temperature",
416
- label="Model Temperature",
417
- initial=0.7,
418
- min=0,
419
- max=1,
420
- step=0.1,
421
- ),
422
- ]
423
- ).send()
424
- await cl.Message(
425
- content="Im Toka. An opens source persian LLM . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? you should ask me your questions like : the capital of england is <mask> "
426
- ).send()
427
-
428
-
429
- @cl.on_message
430
- async def main(message: cl.Message):
431
- chat_profile = cl.user_session.get("chat_profile")
432
- if not chat_profile or chat_profile == 'None':
433
- await cl.Message(
434
- content="Please select a model first."
435
- ).send()
436
- return
437
- if chat_profile == 'neural-brain-AI':
438
- completion = openai_client.chat.completions.create(
439
- model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK",
440
- messages=[
441
- {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
442
- {"role": "user", "content": message.content}
443
- ]
444
- )
445
- model_response = completion.choices[0].message.content
446
- await cl.Message(
447
- content=model_response
448
- ).send()
449
-
450
- elif chat_profile == "Dorna-AI":
451
- result = hf_text_client.predict(
452
- message=message.content,
453
- request="your name is Dorna,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
454
- param_3=512,
455
- param_4=0.7,
456
- param_5=0.95,
457
- api_name="/chat"
458
- )
459
- model_response = result.strip("</s>")
460
- await cl.Message(
461
- content=model_response
462
- ).send()
463
-
464
- elif chat_profile == 'Image-Generation':
465
- await cl.ChatSettings(
466
- Slider(
467
- id="Image_Width",
468
- label="Image Width",
469
- initial=512,
470
- min=256,
471
- max=2048,
472
- step=64,
473
- tooltip="Measured in pixels",
474
- ),
475
- Slider(
476
- id="Image_Height",
477
- label="Image Height",
478
- initial=512,
479
- min=256,
480
- max=2048,
481
- step=64,
482
- tooltip="Measured in pixels",
483
- )).send()
484
- result = hf_image_client.predict(
485
- prompt=message.content,
486
- negative_prompt="",
487
- seed=0,
488
- randomize_seed=True,
489
- width=512,
490
- height=512,
491
- guidance_scale=0,
492
- num_inference_steps=2,
493
- api_name="/infer"
494
- )
495
- image = cl.Image(path=result, name="result", display="inline")
496
- await cl.Message(
497
- content="This message has an image!",
498
- elements=[image],
499
- ).send()
500
- elif chat_profile == 'GPT-4':
501
- completion = openai_client.chat.completions.create(
502
- model="gpt-4",
503
- messages=[
504
- {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
505
- {"role": "user", "content": message.content}
506
- ]
507
- )
508
- model_response = completion.choices[0].message.content
509
- await cl.Message(
510
- content=model_response
511
- ).send()
512
-
513
- elif chat_profile == 'gpt-3.5-turbo':
514
- completion = openai_client.chat.completions.create(
515
- model="gpt-3.5-turbo",
516
- messages=[
517
- {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
518
- {"role": "user", "content": message.content}
519
- ]
520
- )
521
- model_response = completion.choices[0].message.content
522
- await cl.Message(
523
- content=model_response
524
- ).send()
525
- elif chat_profile == 'GPT-3.5-turbo-0125':
526
- completion = openai_client.chat.completions.create(
527
- model="GPT-3.5-turbo-0125",
528
- messages=[
529
- {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
530
- {"role": "user", "content": message.content}
531
- ]
532
- )
533
- model_response = completion.choices[0].message.content
534
- await cl.Message(
535
- content=model_response
536
- ).send()
537
- elif chat_profile == 'gpt-3.5-turbo-1106':
538
- completion = openai_client.chat.completions.create(
539
- model="gpt-3.5-turbo-1106",
540
- messages=[
541
- {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
542
- {"role": "user", "content": message.content}
543
- ]
544
- )
545
- model_response = completion.choices[0].message.content
546
- await cl.Message(
547
- content=model_response
548
- ).send()
549
- elif chat_profile == 'davinci-002':
550
- completion = openai_client.chat.completions.create(
551
- model="davinci-002",
552
- messages=[
553
- {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
554
- {"role": "user", "content": message.content}
555
- ]
556
- )
557
- model_response = completion.choices[0].message.content
558
- await cl.Message(
559
- content=model_response
560
- ).send()
561
-
562
- elif chat_profile == 'TTS':
563
- response = openai_client.audio.speech.create(
564
- model="tts-1",
565
- voice="alloy",
566
- input=message.content,
567
- )
568
-
569
- response.stream_to_file("output.mp3")
570
-
571
- elements = [
572
- cl.Audio(name="output.mp3", path="./output.mp3", display="inline"),
573
- ]
574
- await cl.Message(
575
- content="Here it is the response!",
576
- elements=elements,
577
- ).send()
578
-
579
- elif chat_profile == 'Llama-3-70B':
580
- completion = groq_client.chat.completions.create(
581
- model="llama3-70b-8192",
582
- messages=[
583
- {
584
- "role": "user",
585
- "content": message.content
586
- }
587
- ],
588
- temperature=1,
589
- max_tokens=1024,
590
- top_p=1,
591
- stream=True,
592
- stop=None,
593
- )
594
-
595
- complete_content = ""
596
-
597
- # Iterate over each chunk
598
- for chunk in completion:
599
- # Retrieve the content from the current chunk
600
- content = chunk.choices[0].delta.content
601
-
602
- # Check if the content is not None before concatenating it
603
- if content is not None:
604
- complete_content += content
605
-
606
- # Send the concatenated content as a message
607
- await cl.Message(content=complete_content).send()
608
-
609
- elif chat_profile == 'Llama-3-8B':
610
- completion = groq_client.chat.completions.create(
611
- model="llama3-8b-8192",
612
- messages=[
613
- {
614
- "role": "user",
615
- "content": message.content
616
- }
617
- ],
618
- temperature=1,
619
- max_tokens=1024,
620
- top_p=1,
621
- stream=True,
622
- stop=None,
623
- )
624
-
625
- complete_content = ""
626
-
627
- # Iterate over each chunk
628
- for chunk in completion:
629
- # Retrieve the content from the current chunk
630
- content = chunk.choices[0].delta.content
631
-
632
- # Check if the content is not None before concatenating it
633
- if content is not None:
634
- complete_content += content
635
-
636
- # Send the concatenated content as a message
637
- await cl.Message(content=complete_content).send()
638
-
639
- elif chat_profile == 'gemma-7B':
640
- completion = groq_client.chat.completions.create(
641
- model="gemma-7b-it",
642
- messages=[
643
- {
644
- "role": "user",
645
- "content": message.content
646
- }
647
- ],
648
- temperature=1,
649
- max_tokens=1024,
650
- top_p=1,
651
- stream=True,
652
- stop=None,
653
- )
654
-
655
- complete_content = ""
656
-
657
- # Iterate over each chunk
658
- for chunk in completion:
659
- # Retrieve the content from the current chunk
660
- content = chunk.choices[0].delta.content
661
-
662
- # Check if the content is not None before concatenating it
663
- if content is not None:
664
- complete_content += content
665
-
666
- # Send the concatenated content as a message
667
- await cl.Message(content=complete_content).send()
668
-
669
- elif chat_profile == "zephyr-7B":
670
- result = hf_text_client.predict(
671
- message=message.content,
672
- request="your name is zephyr,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
673
- param_3=512,
674
- param_4=0.7,
675
- param_5=0.95,
676
- api_name="/chat"
677
- )
678
- model_response = result.strip("</s>")
679
- await cl.Message(
680
- content=model_response
681
- ).send()
682
-
683
- elif chat_profile == 'mistral-7B':
684
- completion = groq_client.chat.completions.create(
685
- model="mixtral-8x7b-32768",
686
- messages=[
687
- {
688
- "role": "user",
689
- "content": message.content
690
- }
691
- ],
692
- temperature=1,
693
- max_tokens=1024,
694
- top_p=1,
695
- stream=True,
696
- stop=None,
697
- )
698
-
699
- complete_content = ""
700
-
701
- for chunk in completion:
702
- content = chunk.choices[0].delta.content
703
-
704
- if content is not None:
705
- complete_content += content
706
-
707
- await cl.Message(content=complete_content).send()
708
-
709
- elif chat_profile == 'Toka-353M':
710
- output = query({
711
- "inputs": message.content,
712
- })
713
- await cl.Message(content=output[0]['sequence']).send()
714
-
715
- @cl.on_settings_update
716
- async def setup_agent(settings):
 
717
  print("on_settings_update", settings)
 
1
+ import chainlit as cl
2
+ from gradio_client import Client
3
+ from openai import OpenAI
4
+ from groq import Groq
5
+ import requests
6
+ from chainlit.input_widget import Select, Slider
7
+ import os
8
+
9
+ hf_token = os.environ.get("HF_TOKEN")
10
+ openai_api_key = os.environ.get('OPENAI_API_KEY')
11
+ groq_api_key = os.environ.get('GROQ_API_KEY')
12
+
13
+ hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token)
14
+ # hf_image_client = Client('Artin2009/image-generation', hf_token='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')
15
+ openai_client = OpenAI(api_key=openai_api_key)
16
+ groq_client = Groq(api_key=groq_api_key)
17
+
18
+ API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large"
19
+ headers = {"Authorization": f"Bearer {hf_token}"}
20
+
21
+
22
+ def query(payload):
23
+ response = requests.post(API_URL, headers=headers, json=payload)
24
+ return response.json()
25
+
26
+ @cl.set_chat_profiles
27
+ async def chat_profile():
28
+ return [
29
+ cl.ChatProfile(
30
+ name="None",
31
+ markdown_description="None",
32
+ ),
33
+ cl.ChatProfile(
34
+ name="neural-brain-AI",
35
+ markdown_description="The main model of neural brain",
36
+ ),
37
+ cl.ChatProfile(
38
+ name="Dorna-AI",
39
+ markdown_description="One of the open-sourced models that neural brain team fine-tuned",
40
+ ),
41
+ cl.ChatProfile(
42
+ name='Image-Generation',
43
+ markdown_description='Our image generation model, has a performance like midjourney',
44
+ ),
45
+ cl.ChatProfile(
46
+ name="GPT-4",
47
+ markdown_description="OpenAI's GPT-4 model",
48
+ ),
49
+ cl.ChatProfile(
50
+ name="gpt-3.5-turbo",
51
+ markdown_description="OpenAI's GPT-3.5 Turbo model",
52
+ ),
53
+ cl.ChatProfile(
54
+ name="GPT-3.5-turbo-0125",
55
+ markdown_description="OpenAI's GPT-3.5 Turbo 0125 model",
56
+ ),
57
+ cl.ChatProfile(
58
+ name="gpt-3.5-turbo-1106",
59
+ markdown_description="OpenAI's GPT-3.5 Turbo 1106 model",
60
+ ),
61
+ cl.ChatProfile(
62
+ name="davinci-002",
63
+ markdown_description="OpenAI's Davinci-002 model",
64
+ ),
65
+ cl.ChatProfile(
66
+ name="TTS",
67
+ markdown_description="OpenAI's Text-to-Speech model",
68
+ ),
69
+ cl.ChatProfile(
70
+ name="Llama-3-70B",
71
+ markdown_description="Meta Open Source model Llama-2 with 70B parameters",
72
+ ),
73
+ cl.ChatProfile(
74
+ name="Llama-3-8B",
75
+ markdown_description="Meta Open Source model Llama-2 with 7B parameters",
76
+ ),
77
+ cl.ChatProfile(
78
+ name = "gemma-7B",
79
+ markdown_description = 'Google Open Source LLM'
80
+ ),
81
+ cl.ChatProfile(
82
+ name="zephyr-7B",
83
+ markdown_description="Open Source model Zephyr with 7B parameters",
84
+ ),
85
+ cl.ChatProfile(
86
+ name='mistral-7B',
87
+ markdown_description = 'mistral open source LLM with 7B parameters'
88
+ ),
89
+ cl.ChatProfile(
90
+ name="Toka-353M",
91
+ markdown_description="PartAI Open Source model Toka with 353M parameters",
92
+ )
93
+ ]
94
+
95
+ @cl.on_chat_start
96
+ async def on_chat_start():
97
+ chat_profile = cl.user_session.get("chat_profile")
98
+ if chat_profile == 'neural-brain-AI':
99
+ await cl.ChatSettings(
100
+ [
101
+ Select(
102
+ id="NB-Model",
103
+ label="NeuralBrain - Models",
104
+ values=["Neural Brain AI"],
105
+ initial_index=0,
106
+ )
107
+ ]
108
+ ).send()
109
+ await cl.Message(
110
+ content="Hello, I am the main model of neural brain team, i am an instance of ChatGPT-4, This team finetuned me and i am ready to help you"
111
+ ).send()
112
+
113
+ if chat_profile == 'Dorna-AI':
114
+ await cl.ChatSettings(
115
+ [
116
+ Select(
117
+ id="param_3",
118
+ label="Parameter 3",
119
+ values=["512"], # Only one selectable value
120
+ initial_index=0,
121
+ tooltip="Config parameter 3 (e.g., max tokens)",
122
+ ),
123
+ Select(
124
+ id="param_4",
125
+ label="Parameter 4",
126
+ values=["0.7"], # Only one selectable value
127
+ initial_index=0,
128
+ tooltip="Config parameter 4 (e.g., temperature)",
129
+ ),
130
+ Select(
131
+ id="param_5",
132
+ label="Parameter 5",
133
+ values=["0.95"], # Only one selectable value
134
+ initial_index=0,
135
+ tooltip="Config parameter 5 (e.g., top_p)",
136
+ ),
137
+ Select(
138
+ id="api_name",
139
+ label="API Name",
140
+ values=["/chat"],
141
+ initial_index=0,
142
+ ),
143
+ ]
144
+ ).send()
145
+
146
+ await cl.Message(
147
+ content='my name is Dorna, Your AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!'
148
+ ).send()
149
+
150
+ if chat_profile == 'Image-Generation':
151
+ await cl.ChatSettings(
152
+ [
153
+ Slider(
154
+ id="Image_Width",
155
+ label="Image Width",
156
+ initial=512,
157
+ min=256,
158
+ max=2048,
159
+ step=64,
160
+ tooltip="Measured in pixels",
161
+ ),
162
+ Slider(
163
+ id="Image_Height",
164
+ label="Image Height",
165
+ initial=512,
166
+ min=256,
167
+ max=2048,
168
+ step=64,
169
+ tooltip="Measured in pixels",
170
+ )
171
+ ]).send()
172
+
173
+ image = cl.Image(path='cat.png', name="result", display="inline")
174
+ await cl.Message(
175
+ content="I can make high quality & resoloution images for you, This is an example of what i can do!",
176
+ elements=[image],
177
+ ).send()
178
+
179
+ if chat_profile == 'GPT-4':
180
+ await cl.ChatSettings(
181
+ [
182
+ Select(
183
+ id="OpenAI-Model",
184
+ label="OpenAI - Model",
185
+ values=["gpt-4"],
186
+ initial_index=0,
187
+ ),
188
+ Slider(
189
+ id="Temperature",
190
+ label="Model Temperature",
191
+ initial=0.7,
192
+ min=0,
193
+ max=1,
194
+ step=0.1,
195
+ ),
196
+ ]
197
+ ).send()
198
+ await cl.Message(
199
+ content="Im OpenAI's latest and biggest model. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
200
+ ).send()
201
+
202
+ if chat_profile == 'gpt-3.5-turbo':
203
+ await cl.ChatSettings(
204
+ [
205
+ Select(
206
+ id="OpenAI-Model",
207
+ label="OpenAI - Model",
208
+ values=["gpt-3.5-turbo"],
209
+ initial_index=0,
210
+ ),
211
+ Slider(
212
+ id="Temperature",
213
+ label="Model Temperature",
214
+ initial=0.7,
215
+ min=0,
216
+ max=1,
217
+ step=0.1,
218
+ ),
219
+ ]
220
+ ).send()
221
+ await cl.Message(
222
+ content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
223
+ ).send()
224
+ if chat_profile == 'GPT-3.5-turbo-0125':
225
+ await cl.ChatSettings(
226
+ [
227
+ Select(
228
+ id="OpenAI-Model",
229
+ label="OpenAI - Model",
230
+ values=["gpt-3.5-turbo-0125"],
231
+ initial_index=0,
232
+ ),
233
+ Slider(
234
+ id="Temperature",
235
+ label="Model Temperature",
236
+ initial=0.7,
237
+ min=0,
238
+ max=1,
239
+ step=0.1,
240
+ ),
241
+ ]
242
+ ).send()
243
+ await cl.Message(
244
+ content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
245
+ ).send()
246
+
247
+ if chat_profile == 'gpt-3.5-turbo-1106':
248
+ await cl.ChatSettings(
249
+ [
250
+ Select(
251
+ id="OpenAI-Model",
252
+ label="OpenAI - Model",
253
+ values=["gpt-3.5-turbo-1106"],
254
+ initial_index=0,
255
+ ),
256
+ Slider(
257
+ id="Temperature",
258
+ label="Model Temperature",
259
+ initial=0.7,
260
+ min=0,
261
+ max=1,
262
+ step=0.1,
263
+ ),
264
+ ]
265
+ ).send()
266
+ await cl.Message(
267
+ content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
268
+ ).send()
269
+
270
+ if chat_profile == 'davinci-002':
271
+ await cl.ChatSettings(
272
+ [
273
+ Select(
274
+ id="OpenAI-Model",
275
+ label="OpenAI - Model",
276
+ values=["davinci-002"],
277
+ initial_index=0,
278
+ ),
279
+ Slider(
280
+ id="Temperature",
281
+ label="Model Temperature",
282
+ initial=0.7,
283
+ min=0,
284
+ max=1,
285
+ step=0.1,
286
+ ),
287
+ ]
288
+ ).send()
289
+ await cl.Message(
290
+ content="Im one of the OpenAI's models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
291
+ ).send()
292
+ if chat_profile == 'TTS':
293
+ await cl.Message(
294
+ content="Im TTS. of the best models OpenAI ever created. i can convert text to speech! . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
295
+ ).send()
296
+ if chat_profile == 'Llama-3-70B':
297
+ await cl.ChatSettings(
298
+ [
299
+ Select(
300
+ id="Meta-Model",
301
+ label="Meta - Model",
302
+ values=["Llama-3-70B"],
303
+ initial_index=0,
304
+ ),
305
+ Slider(
306
+ id="Temperature",
307
+ label="Model Temperature",
308
+ initial=0.7,
309
+ min=0,
310
+ max=1,
311
+ step=0.1,
312
+ ),
313
+ ]
314
+ ).send()
315
+ await cl.Message(
316
+ content="Im the big Llama!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
317
+ ).send()
318
+ if chat_profile == 'Llama-3-8B':
319
+ await cl.ChatSettings(
320
+ [
321
+ Select(
322
+ id="Meta-Model",
323
+ label="Meta - Model",
324
+ values=["Llama-3-8B"],
325
+ initial_index=0,
326
+ ),
327
+ Slider(
328
+ id="Temperature",
329
+ label="Model Temperature",
330
+ initial=0.7,
331
+ min=0,
332
+ max=1,
333
+ step=0.1,
334
+ ),
335
+ ]
336
+ ).send()
337
+ await cl.Message(
338
+ content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
339
+ ).send()
340
+ if chat_profile == 'gemma-7B':
341
+ await cl.ChatSettings(
342
+ [
343
+ Select(
344
+ id="Google-Model",
345
+ label="Google - Model",
346
+ values=["Gemma-7B"],
347
+ initial_index=0,
348
+ ),
349
+ Slider(
350
+ id="Temperature",
351
+ label="Model Temperature",
352
+ initial=0.7,
353
+ min=0,
354
+ max=1,
355
+ step=0.1,
356
+ ),
357
+ ]
358
+ ).send()
359
+ await cl.Message(
360
+ content="Im Gemma. the small version of google open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
361
+ ).send()
362
+ if chat_profile == 'zephyr-7B':
363
+ await cl.ChatSettings(
364
+ [
365
+ Select(
366
+ id="zephyr-Model",
367
+ label="zephyr - Model",
368
+ values=["zephyr-7B"],
369
+ initial_index=0,
370
+ ),
371
+ Slider(
372
+ id="Temperature",
373
+ label="Model Temperature",
374
+ initial=0.7,
375
+ min=0,
376
+ max=1,
377
+ step=0.1,
378
+ ),
379
+ ]
380
+ ).send()
381
+ await cl.Message(
382
+ content="Im Zephyr. One of the best open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
383
+ ).send()
384
+ if chat_profile == 'mistral-7B':
385
+ await cl.ChatSettings(
386
+ [
387
+ Select(
388
+ id="Mistral-Model",
389
+ label="Mistral - Model",
390
+ values=["Mistral-7B"],
391
+ initial_index=0,
392
+ ),
393
+ Slider(
394
+ id="Temperature",
395
+ label="Model Temperature",
396
+ initial=0.7,
397
+ min=0,
398
+ max=1,
399
+ step=0.1,
400
+ ),
401
+ ]
402
+ ).send()
403
+ await cl.Message(
404
+ content="Im Mistral. the small version of Mistral Family. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
405
+ ).send()
406
+ if chat_profile == 'Toka-353M':
407
+ await cl.ChatSettings(
408
+ [
409
+ Select(
410
+ id="PartAI-Model",
411
+ label="PartAI - Model",
412
+ values=["TokaBert-353M"],
413
+ initial_index=0,
414
+ ),
415
+ Slider(
416
+ id="Temperature",
417
+ label="Model Temperature",
418
+ initial=0.7,
419
+ min=0,
420
+ max=1,
421
+ step=0.1,
422
+ ),
423
+ ]
424
+ ).send()
425
+ await cl.Message(
426
+ content="Im Toka. An opens source persian LLM . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? you should ask me your questions like : the capital of england is <mask> "
427
+ ).send()
428
+
429
+
430
+ @cl.on_message
431
+ async def main(message: cl.Message):
432
+ chat_profile = cl.user_session.get("chat_profile")
433
+ if not chat_profile or chat_profile == 'None':
434
+ await cl.Message(
435
+ content="Please select a model first."
436
+ ).send()
437
+ return
438
+ if chat_profile == 'neural-brain-AI':
439
+ completion = openai_client.chat.completions.create(
440
+ model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK",
441
+ messages=[
442
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
443
+ {"role": "user", "content": message.content}
444
+ ]
445
+ )
446
+ model_response = completion.choices[0].message.content
447
+ await cl.Message(
448
+ content=model_response
449
+ ).send()
450
+
451
+ elif chat_profile == "Dorna-AI":
452
+ result = hf_text_client.predict(
453
+ message=message.content,
454
+ request="your name is Dorna,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
455
+ param_3=512,
456
+ param_4=0.7,
457
+ param_5=0.95,
458
+ api_name="/chat"
459
+ )
460
+ model_response = result.strip("</s>")
461
+ await cl.Message(
462
+ content=model_response
463
+ ).send()
464
+
465
+ elif chat_profile == 'Image-Generation':
466
+ await cl.ChatSettings(
467
+ Slider(
468
+ id="Image_Width",
469
+ label="Image Width",
470
+ initial=512,
471
+ min=256,
472
+ max=2048,
473
+ step=64,
474
+ tooltip="Measured in pixels",
475
+ ),
476
+ Slider(
477
+ id="Image_Height",
478
+ label="Image Height",
479
+ initial=512,
480
+ min=256,
481
+ max=2048,
482
+ step=64,
483
+ tooltip="Measured in pixels",
484
+ )).send()
485
+ result = hf_image_client.predict(
486
+ prompt=message.content,
487
+ negative_prompt="",
488
+ seed=0,
489
+ randomize_seed=True,
490
+ width=512,
491
+ height=512,
492
+ guidance_scale=0,
493
+ num_inference_steps=2,
494
+ api_name="/infer"
495
+ )
496
+ image = cl.Image(path=result, name="result", display="inline")
497
+ await cl.Message(
498
+ content="This message has an image!",
499
+ elements=[image],
500
+ ).send()
501
+ elif chat_profile == 'GPT-4':
502
+ completion = openai_client.chat.completions.create(
503
+ model="gpt-4",
504
+ messages=[
505
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
506
+ {"role": "user", "content": message.content}
507
+ ]
508
+ )
509
+ model_response = completion.choices[0].message.content
510
+ await cl.Message(
511
+ content=model_response
512
+ ).send()
513
+
514
+ elif chat_profile == 'gpt-3.5-turbo':
515
+ completion = openai_client.chat.completions.create(
516
+ model="gpt-3.5-turbo",
517
+ messages=[
518
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
519
+ {"role": "user", "content": message.content}
520
+ ]
521
+ )
522
+ model_response = completion.choices[0].message.content
523
+ await cl.Message(
524
+ content=model_response
525
+ ).send()
526
+ elif chat_profile == 'GPT-3.5-turbo-0125':
527
+ completion = openai_client.chat.completions.create(
528
+ model="GPT-3.5-turbo-0125",
529
+ messages=[
530
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
531
+ {"role": "user", "content": message.content}
532
+ ]
533
+ )
534
+ model_response = completion.choices[0].message.content
535
+ await cl.Message(
536
+ content=model_response
537
+ ).send()
538
+ elif chat_profile == 'gpt-3.5-turbo-1106':
539
+ completion = openai_client.chat.completions.create(
540
+ model="gpt-3.5-turbo-1106",
541
+ messages=[
542
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
543
+ {"role": "user", "content": message.content}
544
+ ]
545
+ )
546
+ model_response = completion.choices[0].message.content
547
+ await cl.Message(
548
+ content=model_response
549
+ ).send()
550
+ elif chat_profile == 'davinci-002':
551
+ completion = openai_client.chat.completions.create(
552
+ model="davinci-002",
553
+ messages=[
554
+ {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
555
+ {"role": "user", "content": message.content}
556
+ ]
557
+ )
558
+ model_response = completion.choices[0].message.content
559
+ await cl.Message(
560
+ content=model_response
561
+ ).send()
562
+
563
+ elif chat_profile == 'TTS':
564
+ response = openai_client.audio.speech.create(
565
+ model="tts-1",
566
+ voice="alloy",
567
+ input=message.content,
568
+ )
569
+
570
+ response.stream_to_file("output.mp3")
571
+
572
+ elements = [
573
+ cl.Audio(name="output.mp3", path="./output.mp3", display="inline"),
574
+ ]
575
+ await cl.Message(
576
+ content="Here it is the response!",
577
+ elements=elements,
578
+ ).send()
579
+
580
+ elif chat_profile == 'Llama-3-70B':
581
+ completion = groq_client.chat.completions.create(
582
+ model="llama3-70b-8192",
583
+ messages=[
584
+ {
585
+ "role": "user",
586
+ "content": message.content
587
+ }
588
+ ],
589
+ temperature=1,
590
+ max_tokens=1024,
591
+ top_p=1,
592
+ stream=True,
593
+ stop=None,
594
+ )
595
+
596
+ complete_content = ""
597
+
598
+ # Iterate over each chunk
599
+ for chunk in completion:
600
+ # Retrieve the content from the current chunk
601
+ content = chunk.choices[0].delta.content
602
+
603
+ # Check if the content is not None before concatenating it
604
+ if content is not None:
605
+ complete_content += content
606
+
607
+ # Send the concatenated content as a message
608
+ await cl.Message(content=complete_content).send()
609
+
610
+ elif chat_profile == 'Llama-3-8B':
611
+ completion = groq_client.chat.completions.create(
612
+ model="llama3-8b-8192",
613
+ messages=[
614
+ {
615
+ "role": "user",
616
+ "content": message.content
617
+ }
618
+ ],
619
+ temperature=1,
620
+ max_tokens=1024,
621
+ top_p=1,
622
+ stream=True,
623
+ stop=None,
624
+ )
625
+
626
+ complete_content = ""
627
+
628
+ # Iterate over each chunk
629
+ for chunk in completion:
630
+ # Retrieve the content from the current chunk
631
+ content = chunk.choices[0].delta.content
632
+
633
+ # Check if the content is not None before concatenating it
634
+ if content is not None:
635
+ complete_content += content
636
+
637
+ # Send the concatenated content as a message
638
+ await cl.Message(content=complete_content).send()
639
+
640
+ elif chat_profile == 'gemma-7B':
641
+ completion = groq_client.chat.completions.create(
642
+ model="gemma-7b-it",
643
+ messages=[
644
+ {
645
+ "role": "user",
646
+ "content": message.content
647
+ }
648
+ ],
649
+ temperature=1,
650
+ max_tokens=1024,
651
+ top_p=1,
652
+ stream=True,
653
+ stop=None,
654
+ )
655
+
656
+ complete_content = ""
657
+
658
+ # Iterate over each chunk
659
+ for chunk in completion:
660
+ # Retrieve the content from the current chunk
661
+ content = chunk.choices[0].delta.content
662
+
663
+ # Check if the content is not None before concatenating it
664
+ if content is not None:
665
+ complete_content += content
666
+
667
+ # Send the concatenated content as a message
668
+ await cl.Message(content=complete_content).send()
669
+
670
+ elif chat_profile == "zephyr-7B":
671
+ result = hf_text_client.predict(
672
+ message=message.content,
673
+ request="your name is zephyr,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!",
674
+ param_3=512,
675
+ param_4=0.7,
676
+ param_5=0.95,
677
+ api_name="/chat"
678
+ )
679
+ model_response = result.strip("</s>")
680
+ await cl.Message(
681
+ content=model_response
682
+ ).send()
683
+
684
+ elif chat_profile == 'mistral-7B':
685
+ completion = groq_client.chat.completions.create(
686
+ model="mixtral-8x7b-32768",
687
+ messages=[
688
+ {
689
+ "role": "user",
690
+ "content": message.content
691
+ }
692
+ ],
693
+ temperature=1,
694
+ max_tokens=1024,
695
+ top_p=1,
696
+ stream=True,
697
+ stop=None,
698
+ )
699
+
700
+ complete_content = ""
701
+
702
+ for chunk in completion:
703
+ content = chunk.choices[0].delta.content
704
+
705
+ if content is not None:
706
+ complete_content += content
707
+
708
+ await cl.Message(content=complete_content).send()
709
+
710
+ elif chat_profile == 'Toka-353M':
711
+ output = query({
712
+ "inputs": message.content,
713
+ })
714
+ await cl.Message(content=output[0]['sequence']).send()
715
+
716
+ @cl.on_settings_update
717
+ async def setup_agent(settings):
718
  print("on_settings_update", settings)