File size: 1,509 Bytes
d9b58aa
 
 
ee23318
d9b58aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee23318
 
 
 
 
 
d9b58aa
 
 
 
 
 
ee23318
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import streamlit as st
from transformers import PegasusForConditionalGeneration, PegasusTokenizer

st.title("Paraphrase Generator")

model_name = "tuner007/pegasus_paraphrase"
torch_device = "cpu"
tokenizer = PegasusTokenizer.from_pretrained(model_name)


@st.cache(allow_output_mutation=True)
def load_model():
    model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
    return model


def get_response(
    input_text, num_return_sequences, num_beams, max_length=60, temperature=1.5
):
    model = load_model()
    batch = tokenizer(
        [input_text],
        truncation=True,
        padding="longest",
        max_length=max_length,
        return_tensors="pt",
    ).to(torch_device)
    translated = model.generate(
        **batch,
        max_length=max_length,
        num_beams=num_beams,
        num_return_sequences=num_return_sequences,
        temperature=temperature
    )
    tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
    return tgt_text


context = st.text_input(label="Enter a sentence to paraphrase", value="How do I make a deposit?")

num_return_sequences = st.sidebar.slider("Number of paraphrases", 1, 20, 10, 1)
num_beams = num_return_sequences
temperature = st.sidebar.slider("Temperature", 0.1, 5.0, 1.5, 0.1)
max_length = st.sidebar.slider("Max length", 10, 100, 60, 10)

if context:
    response = get_response(
        context, num_return_sequences, num_beams, max_length, temperature
    )

    st.write(response)