winglian commited on
Commit
b15e08e
1 Parent(s): 539af9d

Update app.py

Browse files
Files changed (2) hide show
  1. app.py +29 -18
  2. config.yml +1 -1
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import re
3
  from time import sleep
@@ -9,6 +10,8 @@ import yaml
9
  with open("./config.yml", "r") as f:
10
  config = yaml.load(f, Loader=yaml.Loader)
11
 
 
 
12
 
13
  def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repeat_penalty=None):
14
  input = config["llm"].copy()
@@ -40,13 +43,18 @@ def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k
40
 
41
  def poll_for_status(task_id):
42
  url = f"https://api.runpod.ai/v2/{config['runpod']['endpoint_id']}/status/{task_id}"
 
 
 
43
 
44
  while True:
45
- response = requests.get(url)
46
  if response.status_code == 200:
47
  data = response.json()
48
  if data.get('status') == 'COMPLETED':
49
  return data["output"]
 
 
50
  # Sleep for 3 seconds between each request
51
  sleep(3)
52
 
@@ -58,11 +66,11 @@ def delay_typer(words, delay=0.8):
58
  sleep(delay)
59
 
60
 
61
- def user(message, history):
62
  history = history or []
63
  # Append the user's message to the conversation history
64
- history.append([message, ""])
65
- return "", history
66
 
67
 
68
  def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
@@ -73,9 +81,8 @@ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_
73
  for item in history])
74
 
75
  # remove last space from assistant, some models output a ZWSP if you leave a space
76
- messages = messages[:-1]
77
 
78
- history[-1][1] = ""
79
  prediction = make_prediction(
80
  messages,
81
  max_tokens=max_tokens,
@@ -87,9 +94,11 @@ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_
87
  tokens = re.findall(r'\s*\S+\s*', prediction)
88
  for s in tokens:
89
  answer = s
 
 
90
  history[-1][1] += answer
91
  # stream the response
92
- yield history, history
93
  sleep(config['typer']['delay'])
94
 
95
 
@@ -102,9 +111,8 @@ def rp_chat(history, system_message, max_tokens, temperature, top_p, top_k, repe
102
  for item in history])
103
 
104
  # remove last space from assistant, some models output a ZWSP if you leave a space
105
- messages = messages[:-1]
106
 
107
- history[-1][1] = ""
108
  prediction = make_prediction(
109
  messages,
110
  max_tokens=max_tokens,
@@ -118,14 +126,15 @@ def rp_chat(history, system_message, max_tokens, temperature, top_p, top_k, repe
118
  answer = s
119
  history[-1][1] += answer
120
  # stream the response
121
- yield history, history
122
  sleep(config['typer']['delay'])
123
 
124
 
125
- def clear_chat(chat_history_state, chat_message):
126
  chat_history_state = []
127
  chat_message = ''
128
- return chat_history_state, chat_message
 
129
 
130
 
131
  start_message = """
@@ -146,7 +155,6 @@ with gr.Blocks() as demo:
146
  - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-runpod-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models. You will need to configure you own runpod serverless endpoint.
147
  - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-runpod-ui/blob/main/config.yml)
148
  - You will also need to store your RUNPOD_AI_API_KEY as a SECRET environment variable. DO NOT STORE THIS IN THE config.yml.
149
- - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
150
  - Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there!
151
  """)
152
  with gr.Tab("Chatbot"):
@@ -174,19 +182,22 @@ with gr.Blocks() as demo:
174
  system_msg = gr.Textbox(
175
  start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5)
176
 
 
 
 
177
  chat_history_state = gr.State()
178
- clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
179
  clear.click(lambda: None, None, chatbot, queue=False)
180
 
181
  submit_click_event = submit.click(
182
- fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
183
  ).then(
184
- fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
185
  )
186
  roleplay_click_event = roleplay.click(
187
- fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
188
  ).then(
189
- fn=rp_chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
190
  )
191
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, roleplay_click_event], queue=False)
192
 
 
1
+ import logging
2
  import os
3
  import re
4
  from time import sleep
 
10
  with open("./config.yml", "r") as f:
11
  config = yaml.load(f, Loader=yaml.Loader)
12
 
13
+ logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
14
+
15
 
16
  def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repeat_penalty=None):
17
  input = config["llm"].copy()
 
43
 
44
  def poll_for_status(task_id):
45
  url = f"https://api.runpod.ai/v2/{config['runpod']['endpoint_id']}/status/{task_id}"
46
+ headers = {
47
+ "Authorization": f"Bearer {os.environ['RUNPOD_AI_API_KEY']}"
48
+ }
49
 
50
  while True:
51
+ response = requests.get(url, headers=headers)
52
  if response.status_code == 200:
53
  data = response.json()
54
  if data.get('status') == 'COMPLETED':
55
  return data["output"]
56
+ elif response.status_code >= 400:
57
+ logging.error(response.json())
58
  # Sleep for 3 seconds between each request
59
  sleep(3)
60
 
 
66
  sleep(delay)
67
 
68
 
69
+ def user(message, nudge_msg, history):
70
  history = history or []
71
  # Append the user's message to the conversation history
72
+ history.append([message, nudge_msg])
73
+ return "", nudge_msg, history
74
 
75
 
76
  def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
 
81
  for item in history])
82
 
83
  # remove last space from assistant, some models output a ZWSP if you leave a space
84
+ messages = messages.rstrip()
85
 
 
86
  prediction = make_prediction(
87
  messages,
88
  max_tokens=max_tokens,
 
94
  tokens = re.findall(r'\s*\S+\s*', prediction)
95
  for s in tokens:
96
  answer = s
97
+ print(history)
98
+ print(history[-1])
99
  history[-1][1] += answer
100
  # stream the response
101
+ yield history, history, ""
102
  sleep(config['typer']['delay'])
103
 
104
 
 
111
  for item in history])
112
 
113
  # remove last space from assistant, some models output a ZWSP if you leave a space
114
+ messages = messages.rstrip()
115
 
 
116
  prediction = make_prediction(
117
  messages,
118
  max_tokens=max_tokens,
 
126
  answer = s
127
  history[-1][1] += answer
128
  # stream the response
129
+ yield history, history, ""
130
  sleep(config['typer']['delay'])
131
 
132
 
133
+ def clear_chat(chat_history_state, chat_message, nudge_msg):
134
  chat_history_state = []
135
  chat_message = ''
136
+ nudge_msg = ''
137
+ return chat_history_state, chat_message, nudge_msg
138
 
139
 
140
  start_message = """
 
155
  - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-runpod-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models. You will need to configure you own runpod serverless endpoint.
156
  - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-runpod-ui/blob/main/config.yml)
157
  - You will also need to store your RUNPOD_AI_API_KEY as a SECRET environment variable. DO NOT STORE THIS IN THE config.yml.
 
158
  - Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there!
159
  """)
160
  with gr.Tab("Chatbot"):
 
182
  system_msg = gr.Textbox(
183
  start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5)
184
 
185
+ nudge_msg = gr.Textbox(
186
+ "", label="Assistant Nudge", interactive=True, visible=True, placeholder="the first words of the assistant response to nudge them in the right direction.", lines=1)
187
+
188
  chat_history_state = gr.State()
189
+ clear.click(clear_chat, inputs=[chat_history_state, message, nudge_msg], outputs=[chat_history_state, message, nudge_msg], queue=False)
190
  clear.click(lambda: None, None, chatbot, queue=False)
191
 
192
  submit_click_event = submit.click(
193
+ fn=user, inputs=[message, nudge_msg, chat_history_state], outputs=[message, nudge_msg, chat_history_state], queue=True
194
  ).then(
195
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state, message], queue=True
196
  )
197
  roleplay_click_event = roleplay.click(
198
+ fn=user, inputs=[message, nudge_msg, chat_history_state], outputs=[message, nudge_msg, chat_history_state], queue=True
199
  ).then(
200
+ fn=rp_chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state, message], queue=True
201
  )
202
  stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, roleplay_click_event], queue=False)
203
 
config.yml CHANGED
@@ -3,7 +3,7 @@ model_url: https://huggingface.co/openaccess-ai-collective/manticore-13b-chat-py
3
  typer:
4
  delay: 0.1
5
  runpod:
6
- endpoint_id: 4k4aom7l1xa64k
7
  prefer_async: true
8
  llm:
9
  top_k:
 
3
  typer:
4
  delay: 0.1
5
  runpod:
6
+ endpoint_id: jifr1oczbrmr3n
7
  prefer_async: true
8
  llm:
9
  top_k: