Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ tokenizer = BertTokenizer.from_pretrained(model_name, force_download=True)
|
|
11 |
def inference(input_text):
|
12 |
if "[MASK]" not in input_text:
|
13 |
return "Error: The input text must contain the [MASK] token."
|
14 |
-
|
15 |
# Tokenisierung
|
16 |
inputs = tokenizer(input_text, return_tensors="pt")
|
17 |
mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]
|
@@ -28,11 +28,21 @@ def inference(input_text):
|
|
28 |
# Vorhersage in den Text einfügen
|
29 |
predicted_token = tokenizer.decode(top_token)
|
30 |
result_text = input_text.replace("[MASK]", predicted_token, 1)
|
31 |
-
|
32 |
return result_text
|
33 |
|
|
|
34 |
# Gradio Interface definieren
|
35 |
-
iface = gr.Interface(
|
36 |
fn=inference,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
-
|
|
|
|
|
|
11 |
def inference(input_text):
|
12 |
if "[MASK]" not in input_text:
|
13 |
return "Error: The input text must contain the [MASK] token."
|
14 |
+
|
15 |
# Tokenisierung
|
16 |
inputs = tokenizer(input_text, return_tensors="pt")
|
17 |
mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]
|
|
|
28 |
# Vorhersage in den Text einfügen
|
29 |
predicted_token = tokenizer.decode(top_token)
|
30 |
result_text = input_text.replace("[MASK]", predicted_token, 1)
|
31 |
+
|
32 |
return result_text
|
33 |
|
34 |
+
|
35 |
# Gradio Interface definieren
|
36 |
+
iface = gr.Interface(
|
37 |
fn=inference,
|
38 |
+
inputs="text",
|
39 |
+
outputs="text",
|
40 |
+
examples=[
|
41 |
+
["The capital of France is [MASK]."],
|
42 |
+
["The quick brown fox jumps over the [MASK] dog."],
|
43 |
+
],
|
44 |
+
)
|
45 |
|
46 |
+
# Interface starten
|
47 |
+
if __name__ == "__main__":
|
48 |
+
iface.launch(server_port=7862)
|