Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,6 @@ Paying tribute to the ancient master Zhang Zhongjing, this model integrates clas
|
|
20 |
Please note!!! This model should not be used for any medical purposes or scenarios potentially involving medical or health advice. It is currently still in the research and testing stage. We sincerely request your valuable feedback. Thank you.
|
21 |
"""
|
22 |
|
23 |
-
|
24 |
LICENSE = """
|
25 |
<p/>
|
26 |
|
@@ -38,11 +37,14 @@ if torch.cuda.is_available():
|
|
38 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
39 |
tokenizer.use_default_system_prompt = False
|
40 |
|
|
|
|
|
|
|
41 |
@spaces.GPU
|
42 |
def generate(
|
43 |
message: str,
|
44 |
chat_history: list[tuple[str, str]],
|
45 |
-
system_prompt: str =
|
46 |
max_new_tokens: int = 1024,
|
47 |
temperature: float = 0.6,
|
48 |
top_p: float = 0.95,
|
@@ -83,7 +85,11 @@ def generate(
|
|
83 |
chat_interface = gr.ChatInterface(
|
84 |
fn=generate,
|
85 |
additional_inputs=[
|
86 |
-
gr.Textbox(
|
|
|
|
|
|
|
|
|
87 |
gr.Slider(
|
88 |
label="Max new tokens",
|
89 |
minimum=1,
|
|
|
20 |
Please note!!! This model should not be used for any medical purposes or scenarios potentially involving medical or health advice. It is currently still in the research and testing stage. We sincerely request your valuable feedback. Thank you.
|
21 |
"""
|
22 |
|
|
|
23 |
LICENSE = """
|
24 |
<p/>
|
25 |
|
|
|
37 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
38 |
tokenizer.use_default_system_prompt = False
|
39 |
|
40 |
+
# 定义 system_prompt 变量
|
41 |
+
system_prompt = "You are a helpful TCM assistant named 仲景中医大语言模型, created by 医哲未来. You can switch between Chinese and English based on user preference."
|
42 |
+
|
43 |
@spaces.GPU
|
44 |
def generate(
|
45 |
message: str,
|
46 |
chat_history: list[tuple[str, str]],
|
47 |
+
system_prompt: str = system_prompt,
|
48 |
max_new_tokens: int = 1024,
|
49 |
temperature: float = 0.6,
|
50 |
top_p: float = 0.95,
|
|
|
85 |
chat_interface = gr.ChatInterface(
|
86 |
fn=generate,
|
87 |
additional_inputs=[
|
88 |
+
gr.Textbox(
|
89 |
+
label="System prompt",
|
90 |
+
lines=6,
|
91 |
+
value=system_prompt,
|
92 |
+
),
|
93 |
gr.Slider(
|
94 |
label="Max new tokens",
|
95 |
minimum=1,
|