netherator / app.py
Yeb Havinga
Add app
4c45953
raw
history blame
8.49 kB
import json
import os
import pprint
import time
from random import randint
import psutil
import streamlit as st
import torch
from transformers import (AutoModelForCausalLM, AutoTokenizer, pipeline,
set_seed)
device = torch.cuda.device_count() - 1
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def load_model(model_name):
os.environ["TOKENIZERS_PARALLELISM"] = "false"
try:
if not os.path.exists(".streamlit/secrets.toml"):
raise FileNotFoundError
access_token = st.secrets.get("netherator")
except FileNotFoundError:
access_token = os.environ.get("HF_ACCESS_TOKEN", None)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=access_token)
model = AutoModelForCausalLM.from_pretrained(
model_name, use_auth_token=access_token
)
if device != -1:
model.to(f"cuda:{device}")
return tokenizer, model
class StoryGenerator:
def __init__(self, model_name):
self.model_name = model_name
self.tokenizer = None
self.model = None
self.generator = None
self.model_loaded = False
def load(self):
if not self.model_loaded:
self.tokenizer, self.model = load_model(self.model_name)
self.generator = pipeline(
"text-generation",
model=self.model,
tokenizer=self.tokenizer,
device=device,
)
self.model_loaded = True
def get_text(self, text: str, **generate_kwargs) -> str:
return self.generator(text, **generate_kwargs)
STORY_GENERATORS = [
{
"model_name": "yhavinga/gpt-neo-125M-dutch-nedd",
"desc": "Dutch GPTNeo Small",
"story_generator": None,
},
{
"model_name": "yhavinga/gpt2-medium-dutch-nedd",
"desc": "Dutch GPT2 Medium",
"story_generator": None,
},
# {
# "model_name": "yhavinga/gpt-neo-125M-dutch",
# "desc": "Dutch GPTNeo Small",
# "story_generator": None,
# },
# {
# "model_name": "yhavinga/gpt2-medium-dutch",
# "desc": "Dutch GPT2 Medium",
# "story_generator": None,
# },
]
def instantiate_models():
for sg in STORY_GENERATORS:
sg["story_generator"] = StoryGenerator(sg["model_name"])
with st.spinner(text=f"Loading the model {sg['desc']} ..."):
sg["story_generator"].load()
def set_new_seed():
seed = randint(0, 2 ** 32 - 1)
set_seed(seed)
return seed
def main():
st.set_page_config( # Alternate names: setup_page, page, layout
page_title="Netherator", # String or None. Strings get appended with "β€’ Streamlit".
layout="wide", # Can be "centered" or "wide". In the future also "dashboard", etc.
initial_sidebar_state="expanded", # Can be "auto", "expanded", "collapsed"
page_icon="πŸ“š", # String, anything supported by st.image, or None.
)
instantiate_models()
with open("style.css") as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
st.sidebar.image("demon-reading-Stewart-Orr.png", width=200)
st.sidebar.markdown(
"""# Netherator
Teller of tales from the Netherlands"""
)
model_desc = st.sidebar.selectbox(
"Model", [sg["desc"] for sg in STORY_GENERATORS], index=1
)
st.sidebar.title("Parameters:")
if "prompt_box" not in st.session_state:
st.session_state["prompt_box"] = "Het was een koude winterdag"
st.session_state["text"] = st.text_area("Enter text", st.session_state.prompt_box)
# min_length = st.sidebar.number_input(
# "Min length", min_value=10, max_value=150, value=75
# )
max_length = st.sidebar.number_input(
"Lengte van de tekst",
value=300,
max_value=512,
)
no_repeat_ngram_size = st.sidebar.number_input(
"No-repeat NGram size", min_value=1, max_value=5, value=3
)
repetition_penalty = st.sidebar.number_input(
"Repetition penalty", min_value=0.0, max_value=5.0, value=1.2, step=0.1
)
num_return_sequences = st.sidebar.number_input(
"Num return sequences", min_value=1, max_value=5, value=1
)
if sampling_mode := st.sidebar.selectbox(
"select a Mode", index=0, options=["Top-k Sampling", "Beam Search"]
):
if sampling_mode == "Beam Search":
num_beams = st.sidebar.number_input(
"Num beams", min_value=1, max_value=10, value=4
)
length_penalty = st.sidebar.number_input(
"Length penalty", min_value=0.0, max_value=5.0, value=1.5, step=0.1
)
params = {
"max_length": max_length,
"no_repeat_ngram_size": no_repeat_ngram_size,
"repetition_penalty": repetition_penalty,
"num_return_sequences": num_return_sequences,
"num_beams": num_beams,
"early_stopping": True,
"length_penalty": length_penalty,
}
else:
top_k = st.sidebar.number_input(
"Top K", min_value=0, max_value=100, value=50
)
top_p = st.sidebar.number_input(
"Top P", min_value=0.0, max_value=1.0, value=0.95, step=0.05
)
temperature = st.sidebar.number_input(
"Temperature", min_value=0.05, max_value=1.0, value=0.8, step=0.05
)
params = {
"max_length": max_length,
"no_repeat_ngram_size": no_repeat_ngram_size,
"repetition_penalty": repetition_penalty,
"num_return_sequences": num_return_sequences,
"do_sample": True,
"top_k": top_k,
"top_p": top_p,
"temperature": temperature,
}
st.sidebar.markdown(
"""For an explanation of the parameters, head over to the [Huggingface blog post about text generation](https://huggingface.co/blog/how-to-generate)
and the [Huggingface text generation interface doc](https://huggingface.co/transformers/main_classes/model.html?highlight=generate#transformers.generation_utils.GenerationMixin.generate).
"""
)
if st.button("Run"):
estimate = max_length / 18
if device == -1:
## cpu
estimate = estimate * (1 + 0.7 * (num_return_sequences - 1))
if sampling_mode == "Beam Search":
estimate = estimate * (1.1 + 0.3 * (num_beams - 1))
else:
## gpu
estimate = estimate * (1 + 0.1 * (num_return_sequences - 1))
estimate = 0.5 + estimate / 5
if sampling_mode == "Beam Search":
estimate = estimate * (1.0 + 0.1 * (num_beams - 1))
estimate = int(estimate)
with st.spinner(
text=f"Please wait ~ {estimate} second{'s' if estimate != 1 else ''} while getting results ..."
):
memory = psutil.virtual_memory()
story_generator = next(
(
x["story_generator"]
for x in STORY_GENERATORS
if x["desc"] == model_desc
),
None,
)
seed = set_new_seed()
time_start = time.time()
result = story_generator.get_text(text=st.session_state.text, **params)
time_end = time.time()
time_diff = time_end - time_start
st.subheader("Result")
for text in result:
st.write(text.get("generated_text").replace("\n", " \n"))
# st.text("*Translation*")
# translation = translate(result, "en", "nl")
# st.write(translation.replace("\n", " \n"))
#
info = f"""
---
*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*
*Text generated using seed {seed} in {time_diff:.5} seconds*
"""
st.write(info)
params["seed"] = seed
params["prompt"] = st.session_state.text
params["model"] = story_generator.model_name
params_text = json.dumps(params)
print(params_text)
st.json(params_text)
if __name__ == "__main__":
main()