lliu01 commited on
Commit
a8ba0ae
·
verified ·
1 Parent(s): 18b1d31

Change model

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -60,10 +60,10 @@ def predict(message, history, system_prompt, temperature, max_tokens):
60
 
61
  if __name__ == "__main__":
62
  args = parse_args()
63
- tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-2-12b-chat")
64
- tokenizer = AutoTokenizer.from_pretrained('stabilityai/stablelm-2-12b-chat')
65
  model = AutoModelForCausalLM.from_pretrained(
66
- 'stabilityai/stablelm-2-12b-chat',
67
  torch_dtype=torch.bfloat16,
68
  low_cpu_mem_usage=True
69
  )
@@ -71,8 +71,8 @@ if __name__ == "__main__":
71
  model = model.to(device)
72
  gr.ChatInterface(
73
  predict,
74
- title="StableLM 2 12B Chat - Demo",
75
- description="StableLM 2 12B Chat - StabilityAI",
76
  theme="soft",
77
  chatbot=gr.Chatbot(label="Chat History",),
78
  textbox=gr.Textbox(placeholder="input", container=False, scale=7),
@@ -80,18 +80,18 @@ if __name__ == "__main__":
80
  undo_btn="Delete Previous",
81
  clear_btn="Clear",
82
  additional_inputs=[
83
- gr.Textbox("You are a helpful assistant.", label="System Prompt"),
84
  gr.Slider(0, 1, 0.5, label="Temperature"),
85
  gr.Slider(100, 2048, 1024, label="Max Tokens"),
86
  ],
87
  examples=[
88
- ["What's been the role of music in human societies?"],
89
- ["Escribe un poema corto sobre la historia del Mediterráneo."],
90
- ["Scrivi un Haiku che celebri il gelato."],
91
- ["Schreibe ein Haiku über die Alpen."],
92
- ["Ecris une prose a propos de la mer du Nord."],
93
- ["Escreva um poema sobre a saudade."],
94
- ["Jane has 8 apples, out of which 2 are red and 3 are green. Assuming there are only red, green and white apples, how many of them are white? Solve this in Python."],
95
  ],
96
  additional_inputs_accordion_name="Parameters",
97
  ).queue().launch()
 
60
 
61
  if __name__ == "__main__":
62
  args = parse_args()
63
+ tokenizer = AutoTokenizer.from_pretrained("lliu01/fortios_cli")
64
+ tokenizer = AutoTokenizer.from_pretrained("lliu01/fortios_cli")
65
  model = AutoModelForCausalLM.from_pretrained(
66
+ "lliu01/fortios_cli",
67
  torch_dtype=torch.bfloat16,
68
  low_cpu_mem_usage=True
69
  )
 
71
  model = model.to(device)
72
  gr.ChatInterface(
73
  predict,
74
+ title="FortiOS CLI Chat - Demo",
75
+ description="FortiOS CLI Chat",
76
  theme="soft",
77
  chatbot=gr.Chatbot(label="Chat History",),
78
  textbox=gr.Textbox(placeholder="input", container=False, scale=7),
 
80
  undo_btn="Delete Previous",
81
  clear_btn="Clear",
82
  additional_inputs=[
83
+ gr.Textbox("FortiOS firewall policy configuration.", label="System Prompt"),
84
  gr.Slider(0, 1, 0.5, label="Temperature"),
85
  gr.Slider(100, 2048, 1024, label="Max Tokens"),
86
  ],
87
  examples=[
88
+ ["How can you move a policy by policy ID?"],
89
+ ["What is the command to enable security profiles in a firewall policy?"],
90
+ ["How do you configure a service group in the GUI?"],
91
+ ["How can you configure the firewall policy change summary in the CLI?"],
92
+ ["How do you disable hardware acceleration for an IPv4 firewall policy in the CLI?"],
93
+ ["How can you enable WAN optimization in a firewall policy using the CLI?"],
94
+ ["What are services in FortiOS and how are they used in firewall policies?"],
95
  ],
96
  additional_inputs_accordion_name="Parameters",
97
  ).queue().launch()