events / gen.py
cryptocalypse
Update gen.py
a072676 verified
raw
history blame
7.26 kB
import torch
import sys
import sys
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('stabilityai/stablelm-2-zephyr-1_6b')
model = AutoModelForCausalLM.from_pretrained(
'stabilityai/stablelm-2-zephyr-1_6b',
device_map="auto"
)
# Definir el prompt para generar un JSON con eventos anidados
prompt = (
"Genera un JSON que describa una serie de eventos consecutivos en un formato similar al siguiente:\n\n"
"{\n"
" \"events\": {\n"
" \"event\": {\n"
" \"event_number\": 1,\n"
" \"name\": \"conflict_start\",\n"
" \"description\": \"Tensions escalate between Iran and Israel\",\n"
" \"probability\": 70,\n"
" \"duration_days\": 30,\n"
" \"subevents\": {\n"
" \"event\": {\n"
" \"event_number\": 2,\n"
" \"name\": \"diplomatic_failure\",\n"
" \"description\": \"Diplomatic negotiations fail\",\n"
" \"probability\": 60,\n"
" \"duration_days\": 15,\n"
" \"subevents\": {\n"
" \"event\": {\n"
" \"event_number\": 3,\n"
" \"name\": \"military_clash\",\n"
" \"description\": \"Initial military clash at the border\",\n"
" \"probability\": 50,\n"
" \"duration_days\": 10,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 4,\n"
" \"name\": \"escalation\",\n"
" \"description\": \"Conflict escalates into full-scale war\",\n"
" \"probability\": 40,\n"
" \"duration_days\": 180,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 5,\n"
" \"name\": \"regional_involvement\",\n"
" \"description\": \"Other Middle Eastern countries get involved\",\n"
" \"probability\": 30,\n"
" \"duration_days\": 365,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 6,\n"
" \"name\": \"ceasefire\",\n"
" \"description\": \"International powers broker a ceasefire\",\n"
" \"probability\": 20,\n"
" \"duration_days\": 30\n"
" },\n"
" {\n"
" \"event_number\": 7,\n"
" \"name\": \"prolonged_conflict\",\n"
" \"description\": \"Conflict continues for over a year\",\n"
" \"probability\": 50,\n"
" \"duration_days\": 365\n"
" }\n"
" ]\n"
" }\n"
" },\n"
" {\n"
" \"event_number\": 8,\n"
" \"name\": \"international_intervention\",\n"
" \"description\": \"UN or other international organizations intervene\",\n"
" \"probability\": 25,\n"
" \"duration_days\": 60\n"
" }\n"
" ]\n"
" }\n"
" },\n"
" {\n"
" \"event_number\": 9,\n"
" \"name\": \"containment\",\n"
" \"description\": \"Conflict is contained and doesn't escalate\",\n"
" \"probability\": 30,\n"
" \"duration_days\": 90\n"
" }\n"
" ]\n"
" }\n"
" },\n"
" \"event\": {\n"
" \"event_number\": 10,\n"
" \"name\": \"sanctions\",\n"
" \"description\": \"Increased sanctions on Iran\",\n"
" \"probability\": 70,\n"
" \"duration_days\": 180,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 11,\n"
" \"name\": \"iran_retaliates\",\n"
" \"description\": \"Iran retaliates with cyberattacks\",\n"
" \"probability\": 40,\n"
" \"duration_days\": 60\n"
" },\n"
" {\n"
" \"event_number\": 12,\n"
" \"name\": \"israel_response\",\n"
" \"description\": \"Israel responds with targeted airstrikes\",\n"
" \"probability\": 50,\n"
" \"duration_days\": 60\n"
" }\n"
" ]\n"
" }\n"
" }\n"
" }\n"
" },\n"
" \"event\": {\n"
" \"event_number\": 13,\n"
" \"name\": \"diplomatic_success\",\n"
" \"description\": \"Successful diplomatic negotiations\",\n"
" \"probability\": 40,\n"
" \"duration_days\": 30,\n"
" \"subevents\": {\n"
" \"event\": [\n"
" {\n"
" \"event_number\": 14,\n"
" \"name\": \"peace_agreement\",\n"
" \"description\": \"Iran and Israel sign a peace agreement\",\n"
" \"probability\": 20,\n"
" \"duration_days\": 60\n"
" },\n"
" {\n"
" \"event_number\": 15,\n"
" \"name\": \"temporary_truce\",\n"
" \"description\": \"A temporary truce is established\",\n"
" \"probability\": 30,\n"
" \"duration_days\": 30\n"
" }\n"
" ]\n"
" }\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n\n"
"Ahora, genera un JSON similar con eventos anidados, pero cambia los detalles y números para hacer que sea con el input que viene a continuacion, respondiendo solo el JSON: "
)
def generate(event):
# Generar el texto usando el modelo
prompt_msg = [{'role':'system','content':prompt},{'role': 'user', 'content': event}]
inputs = tokenizer.apply_chat_template(
prompt_msg,
add_generation_prompt=True,
return_tensors='pt'
)
tokens = model.generate(
inputs.to(model.device),
max_new_tokens=1024,
temperature=0.5,
do_sample=True
)
# Imprimir la salida generada
return tokenizer.decode(tokens[0], skip_special_tokens=False)