import streamlit as st from transformers import PegasusForConditionalGeneration, PegasusTokenizer st.title("Paraphrase Generator") model_name = "tuner007/pegasus_paraphrase" torch_device = "cpu" tokenizer = PegasusTokenizer.from_pretrained(model_name) @st.cache(allow_output_mutation=True) def load_model(): model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device) return model def get_response( input_text, num_return_sequences, num_beams, max_length=60, temperature=1.5 ): model = load_model() batch = tokenizer( [input_text], truncation=True, padding="longest", max_length=max_length, return_tensors="pt", ).to(torch_device) translated = model.generate( **batch, max_length=max_length, num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=temperature ) tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True) return tgt_text context = st.text_input(label="Enter a sentence to paraphrase", value="How do I make a deposit?") num_return_sequences = st.sidebar.slider("Number of paraphrases", 1, 20, 10, 1) num_beams = num_return_sequences temperature = st.sidebar.slider("Temperature", 0.1, 5.0, 1.5, 0.1) max_length = st.sidebar.slider("Max length", 10, 100, 60, 10) if context: response = get_response( context, num_return_sequences, num_beams, max_length, temperature ) st.write(response)