File size: 2,336 Bytes
b75af72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
547ac8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import gradio as gr
import torch
from transformers import BertForMaskedLM, BertTokenizer
import asyncio

# Modell und Tokenizer laden mit force_download=True
model_name = "bert-base-uncased"
model = BertForMaskedLM.from_pretrained(model_name, force_download=True)
tokenizer = BertTokenizer.from_pretrained(model_name, force_download=True)

# Inferenz-Funktion definieren
def inference(input_text):
    if "[MASK]" not in input_text:
        return "Error: The input text must contain the [MASK] token."
    
    # Tokenisierung
    inputs = tokenizer(input_text, return_tensors="pt")
    mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]

    # Vorhersage
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits

    # Wahrscheinlichsten Token für [MASK] finden
    mask_token_logits = logits[0, mask_token_index, :]
    top_token = torch.topk(mask_token_logits, 1, dim=1).indices[0].tolist()

    # Vorhersage in den Text einfügen
    predicted_token = tokenizer.decode(top_token)
    result_text = input_text.replace("[MASK]", predicted_token, 1)
    
    return result_text

# Gradio Interface definieren
iface = gr.Interface(
    fn=inference,
    inputs="text",
    outputs="text",
    examples=[
        ["The capital of France is [MASK]."],
        ["The quick brown fox jumps over the [MASK] dog."]
    ]
)

# Interface starten
if __name__ == "__main__":
    # Asynchronen Ereignisloop manuell erstellen und zuweisen
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    
    iface.launch(server_port=7862)


from transformers import pipeline
unmasker = pipeline('fill-mask', model='bert-base-uncased')
unmasker("Hello I'm a [MASK] model.")

from transformers import BertTokenizer, BertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained("bert-base-uncased")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)


from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertModel.from_pretrained("bert-base-uncased")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='tf')
output = model(encoded_input)