Cicciokr commited on
Commit
7888f10
·
verified ·
1 Parent(s): 2a9ceab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -4
app.py CHANGED
@@ -11,10 +11,13 @@ st.write("Inserisci un testo con il token [MASK] per vedere le previsioni del mo
11
 
12
 
13
  st.write("Esempi di testo:");
14
- st.write("duces et reges carthaginiensivm hanno et mago qui [MASK] punico bello cornelium consulem aput liparas ceperunt");
15
- st.write("hanno et mago qui [MASK]  punico bello cornelium consulem aput liparas ceperunt");
16
- #duces et reges carthaginiensivm hanno et mago qui [MASK] punico bello cornelium consulem aput liparas ceperunt
17
- #hanno et mago qui [MASK]  punico bello cornelium consulem aput liparas ceperunt
 
 
 
18
  input_text = st.text_input("Testo:", value="Lorem ipsum dolor sit amet, [MASK] adipiscing elit.")
19
 
20
  # Model based on BERT
@@ -30,6 +33,10 @@ tokenizer_roberta = AutoTokenizer.from_pretrained("pstroe/roberta-base-latin-cas
30
  model_roberta = AutoModelForMaskedLM.from_pretrained("pstroe/roberta-base-latin-cased3")
31
  fill_mask_roberta = pipeline("fill-mask", model=model_roberta, tokenizer=tokenizer_roberta)
32
 
 
 
 
 
33
  tokenizer = AutoTokenizer.from_pretrained(modelname)
34
  model = AutoModelForMaskedLM.from_pretrained(modelname)
35
  fill_mask = pipeline("fill-mask", model=model, tokenizer=tokenizer)
@@ -52,5 +59,9 @@ if input_text:
52
  st.subheader("Risultati delle previsioni con Roberta Base Latin Cased 3:")
53
  for pred_roberta in predictions_roberta:
54
  st.write(f"**Parola**: {pred_roberta['token_str']}, **Probabilità**: {pred_roberta['score']:.4f}, **Sequence**: {pred_roberta['sequence']}")
 
 
 
 
55
 
56
 
 
11
 
12
 
13
  st.write("Esempi di testo:");
14
+ st.write("Asdrubal, frater Annibalis, qui secundo Punico bello [MASK] ingentibus copiis ab Hispania veniens => cum");
15
+ st.write("hanno et mago qui [MASK]  punico bello cornelium consulem aput liparas ceperunt => primo");
16
+ st.write("Lorem ipsum dolor sit amet, [MASK] adipiscing elit. => consectetur");
17
+ st.write("Populus Romanus cum Macedonibus [MASK] ter gessit => bellum");
18
+ #Asdrubal, frater Annibalis, qui secundo Punico bello [MASK] ingentibus copiis ab Hispania veniens => cum
19
+ #hanno et mago qui [MASK]  punico bello cornelium consulem aput liparas ceperunt => primo
20
+ #Lorem ipsum dolor sit amet, [MASK] adipiscing elit. => consectetur
21
  input_text = st.text_input("Testo:", value="Lorem ipsum dolor sit amet, [MASK] adipiscing elit.")
22
 
23
  # Model based on BERT
 
33
  model_roberta = AutoModelForMaskedLM.from_pretrained("pstroe/roberta-base-latin-cased3")
34
  fill_mask_roberta = pipeline("fill-mask", model=model_roberta, tokenizer=tokenizer_roberta)
35
 
36
+ tokenizer_robertaclasscat = AutoTokenizer.from_pretrained("ClassCat/roberta-base-latin-v2")
37
+ model_robertaclasscat = AutoModelForMaskedLM.from_pretrained("ClassCat/roberta-base-latin-v2")
38
+ fill_mask_robertaclasscat = pipeline("fill-mask", model=model_robertaclasscat, tokenizer=tokenizer_robertaclasscat)
39
+
40
  tokenizer = AutoTokenizer.from_pretrained(modelname)
41
  model = AutoModelForMaskedLM.from_pretrained(modelname)
42
  fill_mask = pipeline("fill-mask", model=model, tokenizer=tokenizer)
 
59
  st.subheader("Risultati delle previsioni con Roberta Base Latin Cased 3:")
60
  for pred_roberta in predictions_roberta:
61
  st.write(f"**Parola**: {pred_roberta['token_str']}, **Probabilità**: {pred_roberta['score']:.4f}, **Sequence**: {pred_roberta['sequence']}")
62
+ predictions_robertaclasscat = fill_mask_robertaclasscat(input_text)
63
+ st.subheader("Risultati delle previsioni con Roberta Base Latin ClassCat V2:")
64
+ for pred_robertaclasscat in predictions_robertaclasscat:
65
+ st.write(f"**Parola**: {pred_robertaclasscat['token_str']}, **Probabilità**: {pred_robertaclasscat['score']:.4f}, **Sequence**: {pred_robertaclasscat['sequence']}")
66
 
67