File size: 1,221 Bytes
1e434e8
 
 
 
 
8ddfa6e
1e434e8
 
 
 
 
 
 
 
97842c5
961346c
 
8ddfa6e
 
1e434e8
 
 
 
 
 
 
 
961346c
 
 
1e434e8
 
 
8fa0f4a
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from distutils.command.upload import upload
import pandas as pd
import streamlit as st

from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
from transformers import pipeline


@st.cache
def load_data(file):
    df = pd.read_csv(file, encoding='utf-8', nrows=50)
    return df


#@st.cache # tokenier cannot be cached
def load_pipeline(model_cp, tokenizer_cp):
    return pipeline("question-answering", model=model_cp, tokenizer=tokenizer_cp)


# Page config
title = "Recipe Improver"
icon = "🍣"
st.set_page_config(page_title=title, page_icon=icon)
st.title(title)


# Load tokenizer and model
model_cp = "aidan-o-brien/recipe-improver"
tokenizer_cp = "albert-base-v2"
question_answer = load_pipeline(model_cp, tokenizer_cp)
st.write("Model and tokenizer successfully loaded.")


# Load csv
uploaded_file = st.file_uploader("Choose a csv file", type="csv", key='file_uploader')
if uploaded_file is not None:
    df = load_data(uploaded_file)
    st.write(df.head())

    # Run inference on first example
    first_example = df['review'][0]
    question = "how to improve this recipe?"
    answer = question_answer(question=question, context=first_example)

    # Present results
    st.write(answer)