import streamlit as st import transformers import tensorflow from transformers import pipeline model_checkpoint = "Modfiededition/t5-base-fine-tuned-on-jfleg" @st.cache(allow_output_mutation=True, suppress_st_warning=True) def load_model(): return pipeline("text2text-generation", model=model_checkpoint) model = load_model() #prompts st.title("Writing Assistant for you 🦄") textbox = st.text_area('Write your text in this box:', '', height=200, max_chars=1000) button = st.button('Detect grammar mistakes:') if button: output_text = model(textbox)[0]["generated_text] st.write("Correct : ", output_text) #inputs = tokenizer("Grammar: "+sent,return_tensors="tf") #output_sequences = infer(inputs) #generated_sequences = tokenizer.decode(output_ids) #st.write(generated_sequences)