File size: 1,182 Bytes
0fdee06
 
82cdd19
 
 
0fdee06
 
 
 
82cdd19
 
 
 
 
 
 
 
 
 
 
 
 
0fdee06
 
 
82cdd19
 
 
 
0fdee06
 
 
 
82cdd19
 
 
4c4ed41
82cdd19
 
4115668
82cdd19
4115668
82cdd19
 
 
 
0fdee06
 
82cdd19
 
 
0fdee06
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import json

import streamlit as st
import tokenizers
import torch
from transformers import Pipeline, pipeline

from utils import get_answer


@st.cache(
    hash_funcs={
        torch.nn.parameter.Parameter: lambda _: None,
        tokenizers.Tokenizer: lambda _: None,
        tokenizers.AddedToken: lambda _: None,
    },
    allow_output_mutation=True,
    show_spinner=False,
)
def load_engine() -> Pipeline:

    nlp_qa = pipeline(
        "question-answering",
        model="mrm8488/bert-italian-finedtuned-squadv1-it-alfa",
        tokenizer="mrm8488/bert-italian-finedtuned-squadv1-it-alfa",
    )

    return nlp_qa


with st.spinner(
    text="Sto preparando il necessario per rispondere alle tue domande personali..."
):

    engine = load_engine()

with open("context.json") as f:
    context = json.load(f)

st.title("Le risposte alle tue domande personali")

input = st.text_input("Scrivi una domanda e comparirà la risposta!")

if input:
    try:

        answer = get_answer(input, context["info"], engine)
        st.subheader(answer)

    except:

        st.error(
            "Qualcosa é andato storto. Prova di nuovo con un'altra domanda magari!"
        )