File size: 1,107 Bytes
1e434e8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from distutils.command.upload import upload
from io import StringIO
import pandas as pd
import streamlit as st

from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering


@st.cache
def load_data(file):
    df = pd.read_csv(file, encoding='utf-8', nrows=50)
    return df


@st.cache
def load_model_tokenizer():
    tokenizer_cp = "albert-base-v2"
    model_cp = "aidan-o-brien/recipe-improver"
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_cp)
    model = TFAutoModelForQuestionAnswering.from_pretrained(model_cp)
    return model, tokenizer


# Page config
title = "Recipe Improver"
icon = "🍣"
st.set_page_config(page_title=title, page_icon=icon)
st.title(title)

# Load csv
uploaded_file = st.file_uploader("Choose a csv file", type="csv", key='file_uploader')
if uploaded_file is not None:
    df = load_data(uploaded_file)
    st.write(df.head())

# Load tokenizer and model
model, tokenizer = load_model_tokenizer()
st.write("Model and tokenizer successfully loaded.")

# Pre-process data from csv file


# Run inference


# Post-process output of model


# Present results