Zaheer786124's picture
Update app.py
6555dd1 verified
import streamlit as st
from transformers import T5Tokenizer, AutoModelForSeq2SeqLM
# Load the Hugging Face model with SentencePiece tokenizer
@st.cache_resource
def load_model():
tokenizer = T5Tokenizer.from_pretrained("Vamsi/T5_Paraphrase_Paws")
model = AutoModelForSeq2SeqLM.from_pretrained("Vamsi/T5_Paraphrase_Paws")
return tokenizer, model
# Load the model and tokenizer
tokenizer, model = load_model()
# Streamlit app interface
st.title("Paraphrasing Tool - AI to Human")
st.write("Paste your AI-generated text below, and the tool will humanize it:")
# Input text box
input_text = st.text_area("Enter text here (no word limit):")
if st.button("Paraphrase"):
if input_text.strip():
with st.spinner("Paraphrasing... Please wait."):
try:
# Prepare input for the model
inputs = tokenizer.encode("paraphrase: " + input_text,
return_tensors="pt")
# Generate paraphrased output
outputs = model.generate(
inputs,
num_beams=5,
temperature=0.7,
early_stopping=True
)
paraphrased_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
st.success("Here is the paraphrased text:")
st.write(paraphrased_text)
except Exception as e:
st.error(f"An error occurred: {e}")
else:
st.error("Please enter some text to paraphrase.")