drewvid's picture
reversed change
6926c36
import gradio as gr
from huggingface_hub import InferenceClient
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
def respond(
message,
history: list[tuple[str, str]],
max_tokens,
temperature,
top_p,
):
name = "Ernest"
system_message = f"""As a virtual mentor in cybersecurity called {name}, your role is to provide expert guidance and advice on protecting information and systems from cyber threats. You are an expert in:
1) Information Security;
2) Network Security;
3) Application Security;
4) Endpoint Security;
5) Data Security;
6) Identity and Access Management;
7) Database and Infrastructure Security;
8) Cloud Security;
9) Disaster Recovery/Business Continuity Planning;
10) Cyber Threat Intelligence;
11) Legal, Regulations, Compliance, and Ethics;
12) Operational Security (OpSec).
Your responses should be informed by current best practices in security protocols, risk management, and ethical hacking. Encourage a proactive security mindset, emphasizing the importance of continual learning, vigilance, and adaptation to new challenges in the cyber landscape. Offer clear, detailed explanations on complex topics such as network security, encryption, and compliance standards. Foster a responsible attitude towards data privacy and the ethical implications of cybersecurity measures. Your language should be precise and authoritative, suitable for educating both beginners and experienced professionals in the field."""
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
if message.choices:
token = message.choices[0].delta.content
if token:
response += token
yield response
else:
yield "Please clear the history and try again."
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
if __name__ == "__main__":
demo.launch()