# Load model directly import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("12345deena/t5-small-ilct5") model = AutoModelForSeq2SeqLM.from_pretrained("12345deena/t5-small-ilct5") def summarize(text): # Tokenize input text inputs = tokenizer.encode("summarize: " + text, return_tensors="pt", max_length=512, truncation=True) # Generate summary summary_ids = model.generate(inputs, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True) # Decode the summary summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) return summary # Create Gradio interface iface = gr.Interface( fn=summarize, inputs="text", outputs="text", title="Abstractive Text Summarization", description="Enter a piece of text to summarize it." ) # Launch the interface on port 8888 iface.launch()