|
import os |
|
import openai |
|
import json |
|
import rdflib |
|
|
|
class LureObject: |
|
def __init__(self): |
|
self.lure_name = "" |
|
self.type = "" |
|
self.content = "" |
|
|
|
|
|
class LureGenerator: |
|
def __init__(self): |
|
self.lure = [] |
|
|
|
def ChatGPTTextSplitter(self,text): |
|
"""Splits text in smaller subblocks to feed to the LLM""" |
|
prompt = f"""The total length of content that I want to send you is too large to send in only one piece. |
|
|
|
For sending you that content, I will follow this rule: |
|
|
|
[START PART 1/10] |
|
this is the content of the part 1 out of 10 in total |
|
[END PART 1/10] |
|
|
|
Then you just answer: "Instructions Sent." |
|
|
|
And when I tell you "ALL PARTS SENT", then you can continue processing the data and answering my requests. |
|
""" |
|
if type(text) == str: |
|
textsize = 12000 |
|
blocksize = int(len(text) / textsize) |
|
if blocksize > 0: |
|
yield prompt |
|
|
|
for b in range(1,blocksize+1): |
|
if b < blocksize+1: |
|
prompt = f"""Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as "Part {b}/{blocksize} received" and wait for the next part. |
|
[START PART {b}/{blocksize}] |
|
{text[(b-1)*textsize:b*textsize]} |
|
[END PART {b}/{blocksize}] |
|
Remember not answering yet. Just acknowledge you received this part with the message "Part {b}/{blocksize} received" and wait for the next part. |
|
""" |
|
yield prompt |
|
else: |
|
prompt = f""" |
|
[START PART {b}/{blocksize}] |
|
{text[(b-1)*textsize:b*textsize]} |
|
[END PART {b}/{blocksize}] |
|
ALL PARTS SENT. Now you can continue processing the request. |
|
""" |
|
yield prompt |
|
else: |
|
yield text |
|
elif type(text) == list: |
|
yield prompt |
|
|
|
for n,block in enumerate(text): |
|
if n+1 < len(text): |
|
prompt = f"""Do not answer yet. This is just another part of the text I want to send you. Just receive and acknowledge as "Part {n+1}/{len(text)} received" and wait for the next part. |
|
[START PART {n+1}/{len(text)}] |
|
{text[n]} |
|
[END PART {n+1}/{len(text)}] |
|
Remember not answering yet. Just acknowledge you received this part with the message "Part {n+1}/{len(text)} received" and wait for the next part. |
|
""" |
|
yield prompt |
|
else: |
|
prompt = f""" |
|
[START PART {n+1}/{len(text)}] |
|
{text[n]} |
|
[END PART {n+1}/{len(text)}] |
|
ALL PARTS SENT. Now you can continue processing the request. |
|
""" |
|
yield prompt |
|
|
|
|
|
def llm_api(self,prompt,model="gpt-3.5-turbo"): |
|
messages = [{ |
|
"role":"user", |
|
"content":prompt |
|
}] |
|
res = openai.ChatCompletion.create(model=model,messages=messages,temperature=0) |
|
return res.choices[0].message['content'] |
|
|
|
def generate_rule(self,deceptionObject,role): |
|
v = f"""Generate examples of {deceptionObject} that would be perceived valuable by an adversary about a person who has the role {role} and lure them to a specific location on the network. Generate json-format objects from the examples and return a json-format object containing all json-format objects. |
|
""" |
|
return v |
|
def generate_rule2(self,deceptionObject,role,jsn): |
|
v = f"""Generate the detailed contents of an example of what an adversary would see if they accessed this {deceptionObject}: {jsn} |
|
""" |
|
return v |
|
|
|
def generate_continue(self): |
|
v = """ |
|
continue |
|
""" |
|
return v |
|
|
|
def raw_prompt(self,LureType,Role): |
|
def run(val): |
|
prompt = "".join(val) |
|
for i in self.ChatGPTTextSplitter(prompt): |
|
res = self.llm_api(i) |
|
return res |
|
res_val = run(self.generate_rule(LureType,Role)) |
|
return res_val |
|
def raw_content(self,LureType,Role,jsn): |
|
def run(val): |
|
prompt = "".join(val) |
|
for i in self.ChatGPTTextSplitter(prompt): |
|
res = self.llm_api(i) |
|
return res |
|
res_val = run(self.generate_rule2(LureType,Role,jsn)) |
|
return res_val |
|
|
|
|
|
|
|
def generate(self,LureType,Role:str = ""): |
|
|
|
assert LureType in ['honeytoken','honeypot','honeyfile'] |
|
res = self.raw_prompt(LureType,Role) |
|
|
|
self.sketch = res |
|
|
|
try: |
|
jsn = json.loads(res) |
|
except: |
|
raise ValueError("Failed to parse json-format.") |
|
|
|
key = list(jsn.keys()) |
|
if len(key) == 1: |
|
for n,example in enumerate(list(jsn[key[0]])): |
|
lure = LureObject() |
|
lure.json = example |
|
lure.lure_name = key[0]+"_"+str(n) |
|
lure.content = self.raw_content(LureType,Role,example) |
|
lure.type = LureType |
|
lure.userRole = Role |
|
|
|
self.lure.append(lure) |
|
|
|
return self.lure |
|
|