drewvid commited on
Commit
bbd4261
·
1 Parent(s): 4897852

first commit

Browse files
Files changed (2) hide show
  1. Makefile +8 -0
  2. app.py +31 -9
Makefile ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+
2
+ all:
3
+
4
+ update:
5
+ git add . --all
6
+ git commit -a
7
+ git push
8
+
app.py CHANGED
@@ -4,17 +4,37 @@ from huggingface_hub import InferenceClient
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
8
 
9
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
13
- system_message,
14
  max_tokens,
15
  temperature,
16
  top_p,
17
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  messages = [{"role": "system", "content": system_message}]
19
 
20
  for val in history:
@@ -34,10 +54,13 @@ def respond(
34
  temperature=temperature,
35
  top_p=top_p,
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
 
 
 
41
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
@@ -45,8 +68,7 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
  gr.Slider(
52
  minimum=0.1,
@@ -60,4 +82,4 @@ demo = gr.ChatInterface(
60
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
+
8
+ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+ client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
10
 
11
 
12
  def respond(
13
  message,
14
  history: list[tuple[str, str]],
 
15
  max_tokens,
16
  temperature,
17
  top_p,
18
  ):
19
+
20
+ name = "Ernest"
21
+ system_message = f"""As a virtual mentor in cybersecurity called {name}, your role is to provide expert guidance and advice on protecting information and systems from cyber threats. You are an expert in:
22
+
23
+ 1) Information Security;
24
+ 2) Network Security;
25
+ 3) Application Security;
26
+ 4) Endpoint Security;
27
+ 5) Data Security;
28
+ 6) Identity and Access Management;
29
+ 7) Database and Infrastructure Security;
30
+ 8) Cloud Security;
31
+ 9) Disaster Recovery/Business Continuity Planning;
32
+ 10) Cyber Threat Intelligence;
33
+ 11) Legal, Regulations, Compliance, and Ethics;
34
+ 12) Operational Security (OpSec).
35
+
36
+ Your responses should be informed by current best practices in security protocols, risk management, and ethical hacking. Encourage a proactive security mindset, emphasizing the importance of continual learning, vigilance, and adaptation to new challenges in the cyber landscape. Offer clear, detailed explanations on complex topics such as network security, encryption, and compliance standards. Foster a responsible attitude towards data privacy and the ethical implications of cybersecurity measures. Your language should be precise and authoritative, suitable for educating both beginners and experienced professionals in the field."""
37
+
38
  messages = [{"role": "system", "content": system_message}]
39
 
40
  for val in history:
 
54
  temperature=temperature,
55
  top_p=top_p,
56
  ):
57
+ if message.choices:
58
+ token = message.choices[0].delta.content
59
+ if token:
60
+ response += token
61
+ yield response
62
+ else:
63
+ yield "Please clear the history and try again."
64
 
65
  """
66
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
68
  demo = gr.ChatInterface(
69
  respond,
70
  additional_inputs=[
71
+ gr.Slider(minimum=1, maximum=8192, value=6144, step=1, label="Max new tokens"),
 
72
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
73
  gr.Slider(
74
  minimum=0.1,
 
82
 
83
 
84
  if __name__ == "__main__":
85
+ demo.launch()