Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -42,7 +42,7 @@ if torch.cuda.is_available():
|
|
42 |
def generate(
|
43 |
message: str,
|
44 |
chat_history: list[tuple[str, str]],
|
45 |
-
system_prompt: str = "You are a helpful TCM
|
46 |
max_new_tokens: int = 1024,
|
47 |
temperature: float = 0.6,
|
48 |
top_p: float = 0.9,
|
@@ -83,7 +83,7 @@ def generate(
|
|
83 |
chat_interface = gr.ChatInterface(
|
84 |
fn=generate,
|
85 |
additional_inputs=[
|
86 |
-
gr.Textbox(label="System prompt", lines=6, value="You are a helpful TCM
|
87 |
gr.Slider(
|
88 |
label="Max new tokens",
|
89 |
minimum=1,
|
|
|
42 |
def generate(
|
43 |
message: str,
|
44 |
chat_history: list[tuple[str, str]],
|
45 |
+
system_prompt: str = "You are a helpful TCM assistant named 仲景中医大语言模型, created by 医哲未来. You can switch between Chinese and English based on user preference.",
|
46 |
max_new_tokens: int = 1024,
|
47 |
temperature: float = 0.6,
|
48 |
top_p: float = 0.9,
|
|
|
83 |
chat_interface = gr.ChatInterface(
|
84 |
fn=generate,
|
85 |
additional_inputs=[
|
86 |
+
gr.Textbox(label="System prompt", lines=6, value="You are a helpful TCM assistant named 仲景中医大语言模型, created by 医哲未来. You can switch between Chinese and English based on user preference."),
|
87 |
gr.Slider(
|
88 |
label="Max new tokens",
|
89 |
minimum=1,
|