uhygfd commited on
Commit
341c31a
·
verified ·
1 Parent(s): 83dc5a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -11
app.py CHANGED
@@ -1,15 +1,37 @@
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
 
 
3
 
4
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  def format_prompt(message, history):
7
- prompt = "<s>"
8
- for user_prompt, bot_response in history:
9
- prompt += f"[INST] {user_prompt} [/INST]"
10
- prompt += f" {bot_response}</s> "
11
- prompt += f"[INST] {message} [/INST]"
12
- return prompt
 
 
 
13
 
14
  def generate(
15
  prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
@@ -38,15 +60,17 @@ def generate(
38
  yield output
39
  return output
40
 
41
-
42
  mychatbot = gr.Chatbot(
43
- avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
44
 
 
45
  demo = gr.ChatInterface(fn=generate,
46
  chatbot=mychatbot,
47
- title="Tomoniai's Mixtral 8x7b Chat",
48
  retry_btn=None,
49
  undo_btn=None
50
  )
51
 
52
- demo.queue().launch(show_api=False)
 
 
1
  from huggingface_hub import InferenceClient
2
  import gradio as gr
3
+ import random
4
+ import os
5
 
6
+ hugging_token1 = os.getenv('hugging_token1'),
7
+ hugging_token2 = os.getenv('hugging_token2'),
8
+ hugging_token3 = os.getenv('hugging_token3'),
9
+ hugging_token4 = os.getenv('hugging_token4'),
10
+ hugging_token5 = os.getenv('hugging_token5'),
11
+ hugging_token6 = os.getenv('hugging_token6'),
12
+ hugging_token7 = os.getenv('hugging_token7'),
13
+ hugging_token8 = os.getenv('hugging_token8'),
14
+
15
+ hugging_tokens = [
16
+ hugging_token1, hugging_token2,
17
+ hugging_token3, hugging_token4,
18
+ hugging_token5, hugging_token6,
19
+ hugging_token7, hugging_token8
20
+ ]
21
+ API_TOKEN = random.choice(hugging_tokens)
22
+
23
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1", token=API_TOKEN)
24
 
25
  def format_prompt(message, history):
26
+ # Предварительный текст, который будет добавлен перед каждым запросом
27
+ preface_text = "Пожалуйста, ответь как настоящий эксперт."
28
+ prompt = "<s>"
29
+ for user_prompt, bot_response in history:
30
+ prompt += f"[INST] {user_prompt} [/INST]"
31
+ prompt += f" {bot_response}</s> "
32
+ # Добавление предварительного текста перед текущим сообщением пользователя
33
+ prompt += f"[INST] {preface_text} {message} [/INST]"
34
+ return prompt
35
 
36
  def generate(
37
  prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
 
60
  yield output
61
  return output
62
 
63
+ # Настройка интерфейса чат-бота
64
  mychatbot = gr.Chatbot(
65
+ avatar_images=["./user.png", "./bot.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
66
 
67
+ # Создание интерфейса для чат-бота
68
  demo = gr.ChatInterface(fn=generate,
69
  chatbot=mychatbot,
70
+ title="🤬НЕАДЕКВАТ🤬",
71
  retry_btn=None,
72
  undo_btn=None
73
  )
74
 
75
+ # Запуск демонстрации чат-бота
76
+ demo.queue().launch(show_api=False)