CMLL commited on
Commit
968b320
1 Parent(s): d71ad7e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -36,16 +36,14 @@ if torch.cuda.is_available():
36
  def generate(
37
  message: str,
38
  chat_history: list[tuple[str, str]],
39
- system_prompt: str,
40
  max_new_tokens: int = 1024,
41
  temperature: float = 0.6,
42
  top_p: float = 0.9,
43
  top_k: int = 50,
44
  repetition_penalty: float = 1.2,
45
  ) -> Iterator[str]:
46
- conversation = []
47
- if system_prompt:
48
- conversation.append({"role": "system", "content": system_prompt})
49
  for user, assistant in chat_history:
50
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
51
  conversation.append({"role": "user", "content": message})
@@ -79,7 +77,7 @@ def generate(
79
  chat_interface = gr.ChatInterface(
80
  fn=generate,
81
  additional_inputs=[
82
- gr.Textbox(label="System prompt", lines=6),
83
  gr.Slider(
84
  label="Max new tokens",
85
  minimum=1,
@@ -134,3 +132,4 @@ with gr.Blocks(css="style.css") as demo:
134
 
135
  if __name__ == "__main__":
136
  demo.queue(max_size=20).launch()
 
 
36
  def generate(
37
  message: str,
38
  chat_history: list[tuple[str, str]],
39
+ system_prompt: str = "You are a helpful TCM medical assistant named 仲景中医大语言模型, created by 医哲未来.",
40
  max_new_tokens: int = 1024,
41
  temperature: float = 0.6,
42
  top_p: float = 0.9,
43
  top_k: int = 50,
44
  repetition_penalty: float = 1.2,
45
  ) -> Iterator[str]:
46
+ conversation = [{"role": "system", "content": system_prompt}]
 
 
47
  for user, assistant in chat_history:
48
  conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
49
  conversation.append({"role": "user", "content": message})
 
77
  chat_interface = gr.ChatInterface(
78
  fn=generate,
79
  additional_inputs=[
80
+ gr.Textbox(label="System prompt", lines=6, value="You are a helpful TCM medical assistant named 仲景中医大语言模型, created by 医哲未来."),
81
  gr.Slider(
82
  label="Max new tokens",
83
  minimum=1,
 
132
 
133
  if __name__ == "__main__":
134
  demo.queue(max_size=20).launch()
135
+