File size: 17,884 Bytes
50cb8a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
import os
import openai
import json
import rdflib


class ExampleGenerator:
    def __init__(self):
        self.ontologies = {}
        self.ontology_files = []
        self.rules = {}
        self.description = None
    def add_ontology(self, onto):
        if onto in self.ontology_files:
            raise ValueError("Ontology file already exists.")
        else:
            onto_data = self.get_ontology_file(onto)
            if onto_data:
                self.ontology_files.append(onto)
                self.ontologies[onto] = self.get_ontology_file(onto)
                self.rules[onto] = self.generate_rules(onto)
            else:
                raise ValueError("Ontology file error.")
    def get_ontology_file(self,filename):
        text = ""
        if os.path.isfile(filename):
            with open(filename,'r') as f:
                text = f.read()
            f.close()
            return text
        else:
            raise ValueError("Invalid filename.")
    def ChatGPTTextSplitter(self,text):
        """Splits text in smaller subblocks to feed to the LLM"""
        prompt = f"""The total length of content that I want to send you is too large to send in only one piece.

    For sending you that content, I will follow this rule:

    [START PART 1/10]
    this is the content of the part 1 out of 10 in total
    [END PART 1/10]

    Then you just answer: "Instructions Sent."

    And when I tell you "ALL PARTS SENT", then you can continue processing the data and answering my requests.
        """
        if type(text) == str:
            textsize = 12000
            blocksize = int(len(text) / textsize)
            if blocksize > 0:
                yield prompt

                for b in range(1,blocksize+1):
                    if b < blocksize+1:
                        prompt = f"""Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as "Part {b}/{blocksize} received" and wait for the next part.
                [START PART {b}/{blocksize}]
                {text[(b-1)*textsize:b*textsize]}
                [END PART {b}/{blocksize}]
                Remember not answering yet. Just acknowledge you received this part with the message "Part {b}/{blocksize} received" and wait for the next part.
                        """
                        yield prompt
                    else:
                        prompt = f"""
                [START PART {b}/{blocksize}]
                {text[(b-1)*textsize:b*textsize]}
                [END PART {b}/{blocksize}]
                ALL PARTS SENT. Now you can continue processing the request.
                        """
                        yield prompt
            else:
                yield text
        elif type(text) == list:
            yield prompt

            for n,block in enumerate(text):
                if n+1 < len(text):
                    prompt = f"""Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as "Part {n+1}/{len(text)} received" and wait for the next part.
            [START PART {n+1}/{len(text)}]
            {text[n]}
            [END PART {n+1}/{len(text)}]
            Remember not answering yet. Just acknowledge you received this part with the message "Part {n+1}/{len(text)} received" and wait for the next part.
                    """
                    yield prompt
                else:
                    prompt = f"""
            [START PART {n+1}/{len(text)}]
            {text[n]}
            [END PART {n+1}/{len(text)}]
            ALL PARTS SENT. Now you can continue processing the request.
                    """
                yield prompt

    def send_ontology(self):
        ontology = ""
        if len(self.ontologies) > 0:
            for k,v in self.ontologies.items():
                ontology+=v+"\n"
            print("Sending Ontology in Parts")
            for i in self.ChatGPTTextSplitter(ontology):
                print(self.llm_api(i))
        else:
            raise ValueError("No loaded ontology to send.")
    def llm_api(self,prompt,model="gpt-3.5-turbo"):
        messages = [{
            "role":"user",
            "content":prompt
        }]
        res = openai.ChatCompletion.create(model=model,messages=messages,temperature=0)
        return res.choices[0].message['content']
    
    def generate_rule(self,onto=None):
        """Raw rule string of AEO."""
        v = """These are the components that construct the plan: 
Fake personas - first and last name
People can have one of the following DeceptionRoles:
        adversary
        defender
Identities can take one of the following DeceptionActions:
        engagement:Access - subject is an identity, predicate is an object
        engagement:Alert - subject is an identity, predicate is a human identity
        engagement:Beacon - subject is non-human identities, services, or tools, predicate can be a server, service, tool or an identity
        engagement:Deploy - subject is human or agent, the predicate must be a DeceptionObject
        engagement:Obfuscate
        engagement:Respond
There can be one or more DeceptionObjects:
        engagement:Honeypot
        engagement:Honeytoken
        engagement:Breadcrumb
        engagement:BreadcrumbTrail - this is a set of breadcrumbs
        engagement:LureObject
        engagement:HoneyObject
        engagement:Decoy
        engagement:DataSource
A defender that performs an a DeceptionAction has at least one of the following DefenderObjectives
            objective:Reconnaissance
            objective:Affect
            objective:Collect
            objective:Detect
            objective:Direct
            objective:Disrupt
            objective:Elicit
            objective:Expose
            objective:Motivate
            objective:Plan
            objective:Prepare
            objective:Prevent
            objective:Reassure
            objective:Analyze
            objective:Deny
            objective:ElicitBehavior
            objective:Lure
            objective:TimeSink
            objective:Track
            objective:Trap
An adversary that performs an a DeceptionAction has at least one of the following AdversaryObjectives
            objective:CommandAndControl
            objective:CredentialAccess
            objective:DevelopResource
            objective:Discover
            objective:EscalatePrivilege
            objective:Evade
            objective:Execute
            objective:Exfilitrate
            objective:GainInitialAccess
            objective:Impact
            objective:MoveLaterally
            objective:Persist
            objective:Reconnaissance
Generate the plan in the following structure:
print "Use one engagement:Narrative"
print "Use one engagement:Storyline"
print "Use the following people:"
    Enumerate each person's name and DeceptionRole
    
Each planned event is centered around a DeceptionAction taken by some identity or person with a DeceptionRole onto a DeceptionObject or Object. Enumerate each planned events where each planned event has a short description and number starting with "Planned Event 1". Describe which person deploys a DeceptionObject, what DeceptionAction they used and what the DefenderObjective or AdversaryObjective of the action or deployed DeceptionObject. The last planned event should conclude that an defender has been alerted that the DeceptionObject was accessed by the adversary.

Remember to use only given DefenderObjectives, AdversaryObjectives, DeceptionObjects, DeceptionActions, and DeceptionRoles. Do not use any other objectives, objects, actions, or roles other than what is provided. If a person uses an action not from DeceptionActions, then the action is "uco-core:Action" with the name of action.
        """
        return v
    def generate_json_rule(self,onto=None):
        """Raw rule string of AEO."""
        v = """Remember make a json-ld format example that only uses classes and properties terms from Adversary Engagement Ontology, Unified Cyber Ontology. 

Each engagement:Narrative has property:
    engagement:hasStoryline connects to an engagement:Storyline
Each engagement:Storyline has property:
    engagement:hasEvent connects to a uco-types:Thread
Each uco-types:Thread has properties:
    co:element contains all engagement:PlannedEvents
    co:item contains all uco-types:ThreadItem one each for each engagement:PlannedEvent.
    co:size
    uco-types:threadOriginItem is the uco-types:ThreadItem for the first engagement:PlannedEvent
    uco-types:threadTerminalItem is the uco-types:ThreadItem for the last engagement:PlannedEvent
Each co:size has properties:
    @type as xsd:nonNegativeInteger
    @value which is the number of uco-types:ThreadItem
Each uco-types:ThreadItem has property:
    co:itemContent is the engagement:PlannedEvent
    optional uco-types:threadNextItem is the next uco-types:ThreadItem for the next engagement:PlannedEvent if there is one,
    optional uco-types:threadPreviousItem is the previous uco-types:ThreadItem for the previous engagement:PlannedEvent if there is one
Each engagement:PlannedEvent has property:
    engagement:eventContext connects to one engagement action has property @type one of the following:
        engagement:Access
        engagement:Alert
        engagement:Beacon
        engagement:Deploy
        engagement:Obfuscate
        engagement:Respond
Each engagement action has properties:
    @type is the action
    uco-core:performer
    uco-core:object connects to one of the following engagement deception object denoted as "EDO" objects:
        engagement:Honeypot
        engagement:Honeytoken
        engagement:Breadcrumb
        engagement:BreadcrumbTrail
        engagement:LureObject
        engagement:HoneyObject
        engagement:Decoy
        engagement:DataSource
Each "EDO" object has properties:
    engagement:hasCharacterization connects to a uco-core:UcoObject
    objective:hasObjective with @type objective:Objective and @id with one of the following instances:
            objective:CommandAndControl
            objective:CredentialAccess
            objective:DevelopResource
            objective:Discover
            objective:EscalatePrivilege
            objective:Evade
            objective:Execute
            objective:Exfilitrate
            objective:GainInitialAccess
            objective:Impact
            objective:MoveLaterally
            objective:Persist
            objective:Reconnaissance
            objective:Affect
            objective:Collect
            objective:Detect
            objective:Direct
            objective:Disrupt
            objective:Elicit
            objective:Expose
            objective:Motivate
            objective:Plan
            objective:Prepare
            objective:Prevent
            objective:Reassure
            objective:Analyze
            objective:Deny
            objective:ElicitBehavior
            objective:Lure
            objective:TimeSink
            objective:Track
            objective:Trap
        uco-core:name is the objective
All people have property:
    @type is uco-identity:Person
    uco-core:hasFacet that connects to one of the following: 
        uco-identity:SimpleNameFacet which has the property:
            uco-identity:familyName
            uco-identity:givenName
Each uco-core:Role has properties:
        @id is the role
        uco-core:name is the role
Each uco-core:Role there is a uco-core:Relationship with properties:
    uco-core:kindofRelationship is "has_Role"
    uco-core:source connects to the person who has the role
    uco-core:target connects to uco-core:Role
Each engagement:BreadcrumbTrail has property:
    engagement:hasBreadcrumb connects to uco-types:Thread
        This uco-types:Thread has property:
            co:element contains all engagement:Breadcrumb that belong to this engagement:BreadcrumbTrail
            co:item contains all uco-types:ThreadItem one each for each engagement:Breadcrumb
            co:size
            uco-types:threadOriginItem is the uco-types:ThreadItem for the first engagement:Breadcrumb belonging to this engagement:BreadcrumbTrail
            uco-types:threadTerminalItem is the uco-types:ThreadItem for the last engagement:Breadcrumb belonging to this engagement:BreadcrumbTrail
Each engagement:Breadcrumb has the properties:
    engagement:hasCharacterization which connects to a uco-core:UcoObject with the property:
        uco-core:description which describes the object characterizing the breadcrumb
All classes must include property:
    @type is the class
    @id is a unique identifier
    
If namespace "engagement" prefix is used then https://ontology.adversaryengagement.org/ae/engagement#
If namespace "objective" prefix is used then https://ontology.adversaryengagement.org/ae/objective#
If namespace "role" prefix is used then https://ontology.adversaryengagement.org/ae/role#
If namespace "identity" prefix is used then https://ontology.adversaryengagement.org/ae/identity#
If namespace "uco-core" prefix is used then https://ontology.unifiedcyberontology.org/uco/core#
If namespace "uco-types" prefix is used then https://ontology.unifiedcyberontology.org/uco/types#
If namespace "uco-role" prefix is used then https://ontology.unifiedcyberontology.org/uco/role#
        """
        return v
    
    def generate_continue(self):
        v = """
        continue
        """
        return v
    
    def raw_prompt(self,description,jsn=True):
        def run(val,jsn):
            if jsn:
                prompt = f"""Give me a full json-ld format example for the following scenario:
                {description}

                {"".join(val)}
                """
            else:
                prompt = f"""
                {"".join(val)}
                {description}
                """
            for i in self.ChatGPTTextSplitter(prompt):
                res = self.llm_api(i)
            return res
        
        if not jsn:
            res_val =  run(self.generate_rule(),jsn)
            return res_val
        else:
            res_val =  run(self.generate_json_rule(),jsn)
            try:
                val = json.loads(res_val)
                return val
            except:
                #the response was cut off, prompt for the continuation.
                data = []
                data.append(res_val)
                while True:
                    res = self.llm_api(self.generate_continue())
                    data.append(res)
                    try:
                        full = "".join(data)
                        return json.loads(full)
                    except:
                        pass

            return None
    
    def get_ns(self,string):
        return string.split(":")[0]
    
    
    def auto_generate(self,planSize:int=3,keywords:str="",nkeywords:str=""):
        p = f"Generate a deception plan with {planSize} PlannedEvents."
        if keywords:
            p+= f"The plan must include specifically {keywords}."
        if nkeywords:
            p+= f"Do not include {nkeywords}."
        #deception plan description
        e = self.prompt(p,jsn=False)
        self.description = e
        #deception plan converted into json
        w = self.prompt(e,jsn=True)
        self.json = w
        return self.json
    
    def generate_personas(self):
        if not self.description:
            raise ValueError("Run auto_generate() or prompt() before attempting to generate persona profiles.")
        description =f"""{self.description}
        Generate a believable, detailed, and professional description of each person as if they had a LinkedIn profile with the following configurations:
            Name
            Occupation and occupation job title
            Gender
            Random configuration from the big five personality traits that fits their occupation and 1 of the 16 Myer-Briggs personality classifications. Do not disclose their configuration or their personality type, write as if they wrote an introduction about themselves and their accomplishments.
            Education history including graduation year, school, degree
            Job history with at least 10 active years
            Rewards that highlight the factitious character's strength
            A schedule of their activities and active behaviors at work

"""
        w = self.prompt(description,jsn=False)
        self.persona_descriptions = w
        return w

    
    def prompt(self,description,jsn=True):
        if not self.description:
            self.description = description
        if not jsn:
            res = self.raw_prompt(description,jsn=jsn)
        else:
            res = self.raw_prompt(description)

            #include only relevent namespaces
            prefixes = []

            def is_nested(LIST):
                if type(LIST) == list:
                    for JSON in LIST:
                        for key in JSON.keys():
                            if type(JSON[key]) == dict:
                                is_nested(JSON[key])
                        if '@type' in JSON.keys():
                            prefixes.append(self.get_ns(JSON['@type']))
                else:
                    JSON = LIST
                    for key in JSON.keys():
                        if type(JSON[key]) == dict:
                            is_nested(JSON[key])
                    if '@type' in JSON.keys():
                        prefixes.append(self.get_ns(JSON['@type']))


            is_nested(res['@graph'])
            prefixes = set(prefixes)

            new_prefixes = {}
            for prefix in prefixes:
                if prefix in res['@context']:
                    new_prefixes[prefix] = res['@context'][prefix]

            res['@context'] = new_prefixes

        return res