Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,11 +28,6 @@ input_text = st.text_input("Testo:", value="Lorem ipsum dolor sit amet, [MASK] a
|
|
28 |
modelname = "./models/bert-base-latin-uncased"
|
29 |
|
30 |
|
31 |
-
|
32 |
-
#tokenizer_roberta = AutoTokenizer.from_pretrained("pstroe/roberta-base-latin-cased3")
|
33 |
-
#model_roberta = AutoModelForMaskedLM.from_pretrained("pstroe/roberta-base-latin-cased3")
|
34 |
-
#fill_mask_roberta = pipeline("fill-mask", model=model_roberta, tokenizer=tokenizer_roberta)
|
35 |
-
|
36 |
tokenizer_robertaclasscat = AutoTokenizer.from_pretrained("ClassCat/roberta-base-latin-v2")
|
37 |
model_robertaclasscat = AutoModelForMaskedLM.from_pretrained("ClassCat/roberta-base-latin-v2")
|
38 |
fill_mask_robertaclasscat = pipeline("fill-mask", model=model_robertaclasscat, tokenizer=tokenizer_robertaclasscat)
|
@@ -41,9 +36,7 @@ tokenizer = AutoTokenizer.from_pretrained(modelname)
|
|
41 |
model = AutoModelForMaskedLM.from_pretrained(modelname)
|
42 |
fill_mask = pipeline("fill-mask", model=model, tokenizer=tokenizer)
|
43 |
|
44 |
-
|
45 |
-
#model_lv = AutoModelForMaskedLM.from_pretrained(modelname_lv)
|
46 |
-
#fill_mask_lv = pipeline("fill-mask", model=model_lv, tokenizer=tokenizer_lv)
|
47 |
|
48 |
if input_text:
|
49 |
predictions = fill_mask(input_text)
|
@@ -51,10 +44,6 @@ if input_text:
|
|
51 |
for pred in predictions:
|
52 |
st.write(f"**Parola**: {pred['token_str']}, **Probabilità**: {pred['score']:.4f}, **Sequence**: {pred['sequence']}")
|
53 |
input_text_roberta = input_text.replace("[MASK]", "<mask>")
|
54 |
-
#predictions_roberta = fill_mask_roberta(input_text_roberta)
|
55 |
-
#st.subheader("Risultati delle previsioni con Roberta Base Latin Cased 3:")
|
56 |
-
#for pred_roberta in predictions_roberta:
|
57 |
-
# st.write(f"**Parola**: {pred_roberta['token_str']}, **Probabilità**: {pred_roberta['score']:.4f}, **Sequence**: {pred_roberta['sequence']}")
|
58 |
predictions_robertaclasscat = fill_mask_robertaclasscat(input_text_roberta)
|
59 |
st.subheader("Risultati delle previsioni con Roberta:")
|
60 |
for pred_robertaclasscat in predictions_robertaclasscat:
|
|
|
28 |
modelname = "./models/bert-base-latin-uncased"
|
29 |
|
30 |
|
|
|
|
|
|
|
|
|
|
|
31 |
tokenizer_robertaclasscat = AutoTokenizer.from_pretrained("ClassCat/roberta-base-latin-v2")
|
32 |
model_robertaclasscat = AutoModelForMaskedLM.from_pretrained("ClassCat/roberta-base-latin-v2")
|
33 |
fill_mask_robertaclasscat = pipeline("fill-mask", model=model_robertaclasscat, tokenizer=tokenizer_robertaclasscat)
|
|
|
36 |
model = AutoModelForMaskedLM.from_pretrained(modelname)
|
37 |
fill_mask = pipeline("fill-mask", model=model, tokenizer=tokenizer)
|
38 |
|
39 |
+
|
|
|
|
|
40 |
|
41 |
if input_text:
|
42 |
predictions = fill_mask(input_text)
|
|
|
44 |
for pred in predictions:
|
45 |
st.write(f"**Parola**: {pred['token_str']}, **Probabilità**: {pred['score']:.4f}, **Sequence**: {pred['sequence']}")
|
46 |
input_text_roberta = input_text.replace("[MASK]", "<mask>")
|
|
|
|
|
|
|
|
|
47 |
predictions_robertaclasscat = fill_mask_robertaclasscat(input_text_roberta)
|
48 |
st.subheader("Risultati delle previsioni con Roberta:")
|
49 |
for pred_robertaclasscat in predictions_robertaclasscat:
|