Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from Ai import chatbot, chatbot2, chatbot3, chatbot4, chatbot5, chatbot7, chatbo
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
def chat(message,history: list[tuple[str, str]],system_message,max_tokens,temperature,top_p, top_k):
|
6 |
m=AutoModel.from_pretrained("peterpeter8585/AI1")
|
|
|
7 |
messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
|
8 |
|
9 |
for val in history:
|
@@ -18,6 +19,8 @@ def chat(message,history: list[tuple[str, str]],system_message,max_tokens,temper
|
|
18 |
|
19 |
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
20 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
|
|
|
|
21 |
outputs = pipe(prompt, max_new_tokens=max_tokens, do_sample=True, temperature=temperature, top_k=top_k, top_p=top_p)
|
22 |
return outputs[0]["generated_text"]
|
23 |
import random
|
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
def chat(message,history: list[tuple[str, str]],system_message,max_tokens,temperature,top_p, top_k):
|
6 |
m=AutoModel.from_pretrained("peterpeter8585/AI1")
|
7 |
+
|
8 |
messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
|
9 |
|
10 |
for val in history:
|
|
|
19 |
|
20 |
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
21 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
22 |
+
p=pipe.tokenizer.apply_chat_template([{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}], tokenize=False, add_generation_prompt=True)
|
23 |
+
o= pipe(p, max_new_tokens=max_tokens, do_sample=True, temperature=0.1)
|
24 |
outputs = pipe(prompt, max_new_tokens=max_tokens, do_sample=True, temperature=temperature, top_k=top_k, top_p=top_p)
|
25 |
return outputs[0]["generated_text"]
|
26 |
import random
|