Spaces:
Running
Running
from transformers import AutoTokenizer, T5ForConditionalGeneration | |
# Load your model and tokenizer | |
tokenizer = AutoTokenizer.from_pretrained("t5-base") | |
model = T5ForConditionalGeneration.from_pretrained("t5-base") | |
# Modify the prompt to focus on blockchain security and secure devops | |
prompt = "I am a neural network trained on the task of generating advice for improving blockchain security in secure development environments. Here are my recommendations: " | |
# Increase max tokens for longer outputs | |
max_length = 1024 | |
# Generate output with modified prompt | |
generated_output = model.generate( | |
prompt, | |
max_length=max_length, | |
num_return_sequences=1, | |
return_dict_in_generate=True, | |
temperature=0.7 | |
) | |
print(generated_output) | |