File size: 2,505 Bytes
0aca9b7
f2fb467
9c946c5
023eddc
0aca9b7
f2fb467
023eddc
f2fb467
9c946c5
db11727
f006a0c
f2fb467
 
 
 
0aca9b7
f2fb467
 
 
 
0aca9b7
9c946c5
0aca9b7
 
 
 
 
 
 
 
 
 
0568211
0aca9b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import spaces
import torch

# Load model with 8-bit precision
model_name = "yasserrmd/SmolLM2-135M-synthetic-dlp"
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.bfloat16,
    device_map ="cuda"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Load the pipeline
generator = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer
)

@spaces.GPU
def chat_assistant(chat_history, user_input):
    """Generate a response based on user input and chat history."""
   

    # Generate response
    prompt = "\n".join([f"{entry['role']}: {entry['content']}" for entry in chat_history])
    prompt += f"\nuser: {user_input}\nassistant: "

    response = generator(
        [{"role": "system", "content": "You are a Data Loss Prevention (DLP) assistant designed to help users with questions and tasks related to data security, compliance, and policy enforcement. Respond concisely and professionally, offering practical guidance while ensuring clarity. If additional context or follow-up questions are required, ask the user to refine their input or provide specific examples."},
         {"role": "user", "content": user_input}], max_new_tokens=512, return_full_text=False
    )[0]["generated_text"]

    # Append to chat history
    chat_history.append(("user", user_input))
    chat_history.append(("assistant", response))

    # Return updated chat history
    return chat_history, chat_history

# Initial chat history
chat_history = []

def reset_chat():
    global chat_history
    chat_history = []
    return []

# Gradio Interface
with gr.Blocks() as dlp_chat_app:
    gr.Markdown("""### DLP Chat Assistant\nAsk your questions about Data Loss Prevention (DLP).
    """)

    with gr.Row():
        chat_box = gr.Chatbot(
            label="Chat History",
            placeholder="Assistant responses will appear here...",
        )

    user_input = gr.Textbox(
        label="Your Input",
        placeholder="Type your message here...",
        lines=1
    )

    send_button = gr.Button("Send")
    reset_button = gr.Button("Reset Chat")

    send_button.click(
        fn=chat_assistant,
        inputs=[gr.State(chat_history), user_input],
        outputs=[chat_box, gr.State(chat_history)]
    )

    reset_button.click(
        fn=reset_chat,
        inputs=[],
        outputs=chat_box
    )

# Launch the app
dlp_chat_app.launch(debug=True)