recipe-improver / app.py
aidan-o-brien's picture
remove caching for choosing model
7a5750a
raw
history blame
1.96 kB
from distutils.command.upload import upload
import pandas as pd
import streamlit as st
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
from transformers import pipeline
@st.cache
def load_data(file):
df = pd.read_csv(file, encoding='utf-8', nrows=50)
return df
def load_pipeline(model_cp, tokenizer_cp):
return pipeline("question-answering", model=model_cp, tokenizer=tokenizer_cp)
def choose_model():
with st.sidebar: # streamlit doesn't know how to has this by default?
st.write("# Model Selection")
model_cp = st.selectbox('Select model for inference',
('deepset/roberta-base-squad2',
'aidan-o-brien/recipe-improver'))
# If not my model > model_cp = tokenizer_cp, else > albert tokenizer
if model_cp == "aidan-o-brien/recipe-improver":
return model_cp, "albert-base-v2"
else:
return model_cp, model_cp
# Page config
title = "Recipe Improver"
icon = "🍣"
st.set_page_config(page_title=title, page_icon=icon)
st.title(title)
# Load model and tokenzier
model_cp, tokenizer_cp = choose_model()
question_answer = load_pipeline(model_cp, tokenizer_cp)
st.write("Model and tokenizer successfully loaded.")
# Upload csv - format with expander for aesthetics
with st.expander("Upload csv file"):
uploaded_file = st.file_uploader("Choose a csv file", type="csv", key='file_uploader')
# If file is uploaded, run inference
if uploaded_file is not None:
df = load_data(uploaded_file)
# Run inference on first example
first_example = df['review'][0]
question = "how to improve this recipe?"
answer = question_answer(question=question, context=first_example)
# Present results
st.markdown(f"""
# Results
The review provided was:
{first_example}
The answer given to the question 'how to improve this recipe?' was:
{answer['answer']}
""")