karimaloulou commited on
Commit
a03397e
·
verified ·
1 Parent(s): 8105b4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -24
app.py CHANGED
@@ -1,11 +1,9 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import os
4
  from mitreattack.stix20 import MitreAttackData
5
  from descriptions import descriptions # Assurez-vous que descriptions.py est dans le même répertoire
6
 
7
  # Chemins des fichiers JSON
8
- ics_attack_path = 'ics-attack.json'
9
  enterprise_attack_path = 'enterprise-attack.json'
10
 
11
  # Charger les données ATT&CK
@@ -23,14 +21,7 @@ def generate_system_message(log_input):
23
  description_output = descriptions(log_input)
24
  return f"""<s>[INST] Given these TTPs: {techniques_str}\n\n and here are the descriptions: {description_output}\n\nFigure out which technique is used in these logs and respond in bullet points and nothing else.[/INST]"""
25
 
26
- def respond(
27
- message,
28
- history: list[tuple[str, str]],
29
- system_message,
30
- max_tokens,
31
- temperature,
32
- top_p,
33
- ):
34
  messages = [{"role": "system", "content": system_message}]
35
 
36
  for val in history:
@@ -40,8 +31,6 @@ def respond(
40
  messages.append({"role": "assistant", "content": val[1]})
41
 
42
  messages.append({"role": "user", "content": message})
43
- message_content = message
44
-
45
  response = ""
46
 
47
  for message in client.chat_completion(
@@ -52,15 +41,18 @@ def respond(
52
  top_p=top_p,
53
  ):
54
  token = message.choices[0].delta.content
55
-
56
  response += token
57
  yield response
58
 
 
 
 
 
59
  demo = gr.ChatInterface(
60
  respond,
61
  additional_inputs=[
62
- gr.Textbox(label="Log Input", placeholder="Enter log here..."),
63
- gr.Textbox(label="System message", value="", interactive=False),
64
  gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens"),
65
  gr.Slider(minimum=0.1, maximum=1.0, value=0.1, step=0.1, label="Temperature"),
66
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
@@ -69,19 +61,19 @@ demo = gr.ChatInterface(
69
  description="Enter logs to detect TTPs using the model.",
70
  )
71
 
72
- # Update system message based on log input
73
  def update_system_message(log_input):
74
  return generate_system_message(log_input)
75
 
76
- demo.additional_inputs[1].update(value=gr.update(value=generate_system_message("")))
 
 
 
77
 
78
- # Event handler for log input
79
- def on_log_input_change(log_input):
80
- system_message = generate_system_message(log_input)
81
- demo.additional_inputs[1].update(value=gr.update(value=system_message))
82
-
83
- # Bind event handler
84
- demo.additional_inputs[0].change(on_log_input_change)
85
 
86
  if __name__ == "__main__":
87
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
  from mitreattack.stix20 import MitreAttackData
4
  from descriptions import descriptions # Assurez-vous que descriptions.py est dans le même répertoire
5
 
6
  # Chemins des fichiers JSON
 
7
  enterprise_attack_path = 'enterprise-attack.json'
8
 
9
  # Charger les données ATT&CK
 
21
  description_output = descriptions(log_input)
22
  return f"""<s>[INST] Given these TTPs: {techniques_str}\n\n and here are the descriptions: {description_output}\n\nFigure out which technique is used in these logs and respond in bullet points and nothing else.[/INST]"""
23
 
24
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
 
25
  messages = [{"role": "system", "content": system_message}]
26
 
27
  for val in history:
 
31
  messages.append({"role": "assistant", "content": val[1]})
32
 
33
  messages.append({"role": "user", "content": message})
 
 
34
  response = ""
35
 
36
  for message in client.chat_completion(
 
41
  top_p=top_p,
42
  ):
43
  token = message.choices[0].delta.content
 
44
  response += token
45
  yield response
46
 
47
+ def on_log_input_change(log_input):
48
+ system_message = generate_system_message(log_input)
49
+ return system_message
50
+
51
  demo = gr.ChatInterface(
52
  respond,
53
  additional_inputs=[
54
+ gr.Textbox(label="Log Input", placeholder="Enter log here...", lines=4),
55
+ gr.Textbox(value=generate_system_message(""), label="System message", interactive=False),
56
  gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens"),
57
  gr.Slider(minimum=0.1, maximum=1.0, value=0.1, step=0.1, label="Temperature"),
58
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
61
  description="Enter logs to detect TTPs using the model.",
62
  )
63
 
64
+ # Met à jour le message système lorsque l'entrée change
65
  def update_system_message(log_input):
66
  return generate_system_message(log_input)
67
 
68
+ # Fonction pour mettre à jour les valeurs de l'interface
69
+ def interface_update(log_input, *args):
70
+ system_message = update_system_message(log_input)
71
+ return gr.update(value=system_message)
72
 
73
+ # Associe l'entrée des logs à la fonction de mise à jour
74
+ demo.add_component(
75
+ gr.Textbox(label="Log Input", placeholder="Enter log here...", lines=4, change=interface_update)
76
+ )
 
 
 
77
 
78
  if __name__ == "__main__":
79
  demo.launch()