|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, LlamaTokenizer |
|
|
|
|
|
@st.cache_resource |
|
def load(): |
|
model = AutoModelForCausalLM.from_pretrained( |
|
"stabilityai/japanese-stablelm-instruct-alpha-7b", |
|
trust_remote_code=True, |
|
) |
|
tokenizer = LlamaTokenizer.from_pretrained( |
|
"novelai/nerdstash-tokenizer-v1", |
|
additional_special_tokens=['▁▁'], |
|
) |
|
return model, tokenizer |
|
|
|
def generate(): |
|
pass |
|
|
|
|
|
st.header(":dna: 遺伝カウンセリング対話AI") |
|
|
|
st.sidebar.header("Options") |
|
st.session_state["options"]["temperature"] = st.sidebar.slider("temperature", min_value=0.0, max_value=2.0, step=0.1, value=st.session_state["options"]["temperature"]) |
|
st.session_state["options"]["top_k"] = st.sidebar.slider("top_k", min_value=0, max_value=100, step=1, value=st.session_state["options"]["top_k"]) |
|
st.session_state["options"]["top_p"] = st.sidebar.slider("top_p", min_value=0.0, max_value=1.0, step=0.1, value=st.session_state["options"]["top_p"]) |
|
st.session_state["options"]["repetition_penalty"] = st.sidebar.slider("repetition_penalty", min_value=1.0, max_value=2.0, step=0.01, value=st.session_state["options"]["repetition_penalty"]) |
|
st.session_state["options"]["system_prompt"] = st.sidebar.text_area("System Prompt", value=st.session_state["options"]["system_prompt"]) |
|
|
|
model, tokenizer = load() |