Spaces:
Sleeping
Sleeping
create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import llama_cpp
|
3 |
+
|
4 |
+
@st.cache_resource
|
5 |
+
def load_model(model_path):
|
6 |
+
return llama_cpp.Llama(model_path=model_path)
|
7 |
+
|
8 |
+
st.title("CyberSecurity Assistant")
|
9 |
+
|
10 |
+
model_path = "./llama-3-3b-cybersecurity-quantized.gguf"
|
11 |
+
temperature = st.sidebar.slider('Temperature', 0.0,2.0,0.2, step=0.1)
|
12 |
+
|
13 |
+
if model_path:
|
14 |
+
try:
|
15 |
+
llm = load_model(model_path)
|
16 |
+
st.sidebar.success('Model loaded Successfully')
|
17 |
+
except Exception as e:
|
18 |
+
st.sidebar.error(f'Error loading the model: {e}')
|
19 |
+
llm = None
|
20 |
+
else:
|
21 |
+
st.warning("Model path not found")
|
22 |
+
|
23 |
+
# SYSTEM PROMPT
|
24 |
+
|
25 |
+
# GLOBAL VARIABLE INSTRUCTION
|
26 |
+
instruction= 'You are a Cybersecurity AI Assistant, will be glad to answer your questions related to Cybersecurity, particularly LLM Security.'
|
27 |
+
|
28 |
+
|
29 |
+
if llm:
|
30 |
+
user_input = st.text_input("Your message", "")
|
31 |
+
user_input= f'{instruction} \n\nUser: {user_input}\nAI'
|
32 |
+
if user_input:
|
33 |
+
with st.spinner("Generating response..."):
|
34 |
+
try:
|
35 |
+
response = llm(user_input, temperature=temperature, max_tokens=256)
|
36 |
+
st.write(f"**Cybersecurity Assistant:** {response['choices'][0]['text'].strip()}")
|
37 |
+
except Exception as e:
|
38 |
+
st.error(f"Error in Generation: {e}")
|