Spaces:
Sleeping
Sleeping
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer | |
import re | |
# Load saved model and tokenizer | |
model_checkpoint = "24NLPGroupO/EmailGeneration" | |
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, truncation=True) | |
model = AutoModelForCausalLM.from_pretrained(model_checkpoint) | |
# Set up the generation pipeline | |
generator = pipeline('text-generation', model=model, tokenizer=tokenizer) | |
def clean_generated_text(text): | |
# Basic cleaning | |
text = re.sub(r'^(Re:|Fwd:)', '', text) # Remove reply and forward marks | |
text = re.sub(r'Best regards,.*$', '', text, flags=re.DOTALL) # Remove everything after signature | |
text = re.sub(r'PHONE.*$', '', text, flags=re.DOTALL) # Remove everything after phone numbers | |
text = re.sub(r'Email:.*$', '', text, flags=re.DOTALL) # Remove everything after email addresses | |
text = re.sub(r'Cc:.*$', '', text, flags=re.DOTALL) # Remove CC list | |
text = re.sub(r'\* Attachments:.*', '', text, flags=re.S) # Remove 'Attachments:' and everything following it | |
text = re.sub(r'©️ .*$', '', text, flags=re.DOTALL) # Remove copyright and ownership statements | |
text = re.sub(r'URL If this message is not displaying properly, click here.*$', '', text, flags=re.DOTALL) # Remove error display message and links | |
text = re.sub(r'\d{5,}', 'NUMBER', text) # Replace long sequences of numbers, likely phone numbers or ZIP codes | |
return text.strip() | |
def generate_email(product, gender, profession, hobby): | |
input_text = f"{product} {gender} {profession} {hobby}" | |
result = generator( | |
input_text, # Initial text to prompt the model. Sets the context or topic for text generation. | |
max_length=256, # Maximum length of the generated text in tokens, limiting the output size. | |
do_sample=True, # Enables stochastic sampling; the model can generate diverse outputs at each step. | |
top_k=20, # Limits the vocabulary considered at each step to the top-k most likely next words. | |
top_p=0.6, # Uses nucleus sampling: Narrows down to the smallest set of words totaling 60% of the likelihood. | |
temperature=0.4, # Scales logits before sampling to reduce randomness and produce more deterministic output. | |
repetition_penalty=1.5, # Penalizes words that were already mentioned, reducing repetition in the text. | |
# truncation=True, # Truncates the output to the maximum length if it exceeds it. | |
num_return_sequences=3 # Generates three different sequences to choose from, enhancing output variety. | |
) | |
# Select the best output from the generated sequences | |
best_text = sorted([clean_generated_text(r['generated_text']) for r in result], key=len)[-1] | |
return best_text | |