# Load model directly from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("Chillyblast/Bart_Summarization") model = AutoModelForSeq2SeqLM.from_pretrained("Chillyblast/Bart_Summarization") from transformers import pipeline # Create a pipeline for text summarization summarizer = pipeline("summarization", model=model, tokenizer=tokenizer) # Example input for inference dialogue = input(str("Enter the input:")) # Perform inference summary = summarizer(dialogue, max_length=500, min_length=300, do_sample=False) # Print the summary print("Summary:", summary[0]['summary_text'])