JingjingZhai's picture
Update app.py
a083874 verified
raw
history blame
784 Bytes
from transformers import pipeline, AutoModelForMaskedLM, AutoTokenizer
import gradio as gr
model_name = "bert-base-uncased
model = AutoModelForMaskedLM.from_pretrained(model_name, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
nlp = pipeline("fill-mask", model=model, tokenizer=tokenizer)
def predict_masked_text(text):
results = nlp(text)
return [result['sequence'] for result in results]
# Create the Gradio interface
iface = gr.Interface(
fn=predict_masked_text,
inputs=gr.Textbox(lines=2, placeholder="Enter text with a [MASK] token..."),
outputs=gr.Textbox(),
title="Masked Language Modeling",
description="Fill in the masked token in the input text."
)
# Launch the interface
iface.launch()