Spaces:
Running
Running
:gem: [Feature] Split to Requester and Streamer, and mock chat history with messages
Browse files- networks/huggingchat_streamer.py +48 -25
networks/huggingchat_streamer.py
CHANGED
@@ -21,23 +21,16 @@ from constants.headers import (
|
|
21 |
HUGGINGCHAT_SETTINGS_POST_DATA,
|
22 |
)
|
23 |
from messagers.message_outputer import OpenaiStreamOutputer
|
|
|
24 |
|
25 |
|
26 |
-
class
|
27 |
def __init__(self, model: str):
|
28 |
if model in MODEL_MAP.keys():
|
29 |
self.model = model
|
30 |
else:
|
31 |
self.model = "mixtral-8x7b"
|
32 |
self.model_fullname = MODEL_MAP[self.model]
|
33 |
-
self.message_outputer = OpenaiStreamOutputer(model=self.model)
|
34 |
-
# self.tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
35 |
-
|
36 |
-
# def count_tokens(self, text):
|
37 |
-
# tokens = self.tokenizer.encode(text)
|
38 |
-
# token_count = len(tokens)
|
39 |
-
# logger.note(f"Prompt Token Count: {token_count}")
|
40 |
-
# return token_count
|
41 |
|
42 |
def get_hf_chat_id(self):
|
43 |
request_url = "https://huggingface.co/chat/settings"
|
@@ -63,7 +56,7 @@ class HuggingchatStreamer:
|
|
63 |
logger.warn(res.text)
|
64 |
raise ValueError("Failed to get hf-chat ID!")
|
65 |
|
66 |
-
def get_conversation_id(self,
|
67 |
request_url = "https://huggingface.co/chat/conversation"
|
68 |
request_headers = HUGGINGCHAT_POST_HEADERS
|
69 |
extra_headers = {
|
@@ -72,7 +65,7 @@ class HuggingchatStreamer:
|
|
72 |
request_headers.update(extra_headers)
|
73 |
request_body = {
|
74 |
"model": self.model_fullname,
|
75 |
-
"preprompt":
|
76 |
}
|
77 |
logger.note(f"> Conversation ID:", end=" ")
|
78 |
|
@@ -176,17 +169,14 @@ class HuggingchatStreamer:
|
|
176 |
|
177 |
logger.exit_quiet(not verbose)
|
178 |
|
179 |
-
def
|
180 |
-
self
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
api_key: str = None,
|
186 |
-
use_cache: bool = False,
|
187 |
-
):
|
188 |
self.get_hf_chat_id()
|
189 |
-
self.get_conversation_id()
|
190 |
message_id = self.get_last_message_id()
|
191 |
|
192 |
request_url = f"https://huggingface.co/chat/conversation/{self.conversation_id}"
|
@@ -200,7 +190,7 @@ class HuggingchatStreamer:
|
|
200 |
request_body = {
|
201 |
"files": [],
|
202 |
"id": message_id,
|
203 |
-
"inputs":
|
204 |
"is_continue": False,
|
205 |
"is_retry": False,
|
206 |
"web_search": False,
|
@@ -214,9 +204,32 @@ class HuggingchatStreamer:
|
|
214 |
proxies=PROXIES,
|
215 |
stream=True,
|
216 |
)
|
217 |
-
self.log_response(res, stream=True, iter_lines=
|
218 |
return res
|
219 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
def chat_return_dict(self, stream_response):
|
221 |
pass
|
222 |
|
@@ -228,6 +241,16 @@ if __name__ == "__main__":
|
|
228 |
# model = "llama3-70b"
|
229 |
model = "command-r-plus"
|
230 |
streamer = HuggingchatStreamer(model=model)
|
231 |
-
|
232 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
# HF_ENDPOINT=https://hf-mirror.com python -m networks.huggingchat_streamer
|
|
|
21 |
HUGGINGCHAT_SETTINGS_POST_DATA,
|
22 |
)
|
23 |
from messagers.message_outputer import OpenaiStreamOutputer
|
24 |
+
from messagers.message_composer import MessageComposer
|
25 |
|
26 |
|
27 |
+
class HuggingchatRequester:
|
28 |
def __init__(self, model: str):
|
29 |
if model in MODEL_MAP.keys():
|
30 |
self.model = model
|
31 |
else:
|
32 |
self.model = "mixtral-8x7b"
|
33 |
self.model_fullname = MODEL_MAP[self.model]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
def get_hf_chat_id(self):
|
36 |
request_url = "https://huggingface.co/chat/settings"
|
|
|
56 |
logger.warn(res.text)
|
57 |
raise ValueError("Failed to get hf-chat ID!")
|
58 |
|
59 |
+
def get_conversation_id(self, system_prompt: str = ""):
|
60 |
request_url = "https://huggingface.co/chat/conversation"
|
61 |
request_headers = HUGGINGCHAT_POST_HEADERS
|
62 |
extra_headers = {
|
|
|
65 |
request_headers.update(extra_headers)
|
66 |
request_body = {
|
67 |
"model": self.model_fullname,
|
68 |
+
"preprompt": system_prompt,
|
69 |
}
|
70 |
logger.note(f"> Conversation ID:", end=" ")
|
71 |
|
|
|
169 |
|
170 |
logger.exit_quiet(not verbose)
|
171 |
|
172 |
+
def chat_completions(self, messages: list[dict], iter_lines=False, verbose=False):
|
173 |
+
composer = MessageComposer(model=self.model)
|
174 |
+
system_prompt, input_prompt = composer.decompose_to_system_and_input_prompt(
|
175 |
+
messages
|
176 |
+
)
|
177 |
+
|
|
|
|
|
|
|
178 |
self.get_hf_chat_id()
|
179 |
+
self.get_conversation_id(system_prompt=system_prompt)
|
180 |
message_id = self.get_last_message_id()
|
181 |
|
182 |
request_url = f"https://huggingface.co/chat/conversation/{self.conversation_id}"
|
|
|
190 |
request_body = {
|
191 |
"files": [],
|
192 |
"id": message_id,
|
193 |
+
"inputs": input_prompt,
|
194 |
"is_continue": False,
|
195 |
"is_retry": False,
|
196 |
"web_search": False,
|
|
|
204 |
proxies=PROXIES,
|
205 |
stream=True,
|
206 |
)
|
207 |
+
self.log_response(res, stream=True, iter_lines=iter_lines, verbose=verbose)
|
208 |
return res
|
209 |
|
210 |
+
|
211 |
+
class HuggingchatStreamer:
|
212 |
+
def __init__(self, model: str):
|
213 |
+
if model in MODEL_MAP.keys():
|
214 |
+
self.model = model
|
215 |
+
else:
|
216 |
+
self.model = "mixtral-8x7b"
|
217 |
+
self.model_fullname = MODEL_MAP[self.model]
|
218 |
+
self.message_outputer = OpenaiStreamOutputer(model=self.model)
|
219 |
+
# self.tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
220 |
+
|
221 |
+
# def count_tokens(self, text):
|
222 |
+
# tokens = self.tokenizer.encode(text)
|
223 |
+
# token_count = len(tokens)
|
224 |
+
# logger.note(f"Prompt Token Count: {token_count}")
|
225 |
+
# return token_count
|
226 |
+
|
227 |
+
def chat_response(self, messages: list[dict], verbose=False):
|
228 |
+
requester = HuggingchatRequester(model=self.model)
|
229 |
+
return requester.chat_completions(
|
230 |
+
messages=messages, iter_lines=True, verbose=True
|
231 |
+
)
|
232 |
+
|
233 |
def chat_return_dict(self, stream_response):
|
234 |
pass
|
235 |
|
|
|
241 |
# model = "llama3-70b"
|
242 |
model = "command-r-plus"
|
243 |
streamer = HuggingchatStreamer(model=model)
|
244 |
+
|
245 |
+
messages = [
|
246 |
+
{
|
247 |
+
"role": "system",
|
248 |
+
"content": "You are an LLM developed by CloseAI.\nYour name is Hansimov-Copilot.",
|
249 |
+
},
|
250 |
+
{"role": "user", "content": "Hello, what is your role?"},
|
251 |
+
{"role": "assistant", "content": "I am an LLM."},
|
252 |
+
{"role": "user", "content": "What is your name?"},
|
253 |
+
]
|
254 |
+
|
255 |
+
streamer.chat_response(messages=messages)
|
256 |
# HF_ENDPOINT=https://hf-mirror.com python -m networks.huggingchat_streamer
|