Jingjing Zhai
Update app
166b6c4
raw
history blame
810 Bytes
from transformers import pipeline, AutoModelForMaskedLM, AutoTokenizer
import gradio as gr
model_name = "kuleshov-group/PlantCaduceus_l20"
model = AutoModelForMaskedLM.from_pretrained(model_name, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
nlp = pipeline("fill-mask", model=model, tokenizer=tokenizer)
def predict_masked_text(text):
results = nlp(text)
return [result['sequence'] for result in results]
# Create the Gradio interface
iface = gr.Interface(
fn=predict_masked_text,
inputs=gr.Textbox(lines=2, placeholder="Enter text with a [MASK] token..."),
outputs=gr.Textbox(),
title="Masked Language Modeling",
description="Fill in the masked token in the input text."
)
# Launch the interface
iface.launch(share=True)