File size: 1,344 Bytes
d928ac3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import streamlit as st
from transformers import GPT2LMHeadModel, GPT2Tokenizer

@st.cache(allow_output_mutation=True)
def load_model():
    '''
    Loads the model and tokenizer from the local directory.
    :return: A list containing the model and the tokenizer.
    '''
    model_name = 'WIP'
    tokenizer = GPT2Tokenizer.from_pretrained(model_name)
    model = GPT2LMHeadModel.from_pretrained(model_name)
    return [model, tokenizer]

st.set_page_config(
    page_title="BulgakovLM Example",
    page_icon="πŸ‘¨β€πŸ’»",
)

st.markdown("# πŸ‘¨β€πŸ’» BulgakovLM Example")

txt = st.text_area('Write code here', '''ΠžΠ΄Π½Π°ΠΆΠ΄Ρ‹ ΡƒΡ‚Ρ€ΠΎΠΌ''', height=400)

gen = st.button('Generate')

c = st.code('')

max_length = st.slider('max_length', 1, 1024, 128)
top_k = st.slider('top_k', 0, 100, 50)
top_p = st.slider('top_p', 0.0, 1.0, 0.9)
temperature = st.slider('temperature', 0.0, 1.0, 1.0)
num_beams = st.slider('num_beams', 1, 100, 5)
repetition_penalty = st.slider('repetition_penalty', 1.0, 10.0, 1.0)


if gen:
    c.code('Generating...')
    m = load_model()

    inpt = m[1].encode(txt, return_tensors="pt")
    out = m[0].generate(inpt, max_length=max_length, top_p=top_p, top_k=top_k, temperature=temperature, num_beams=num_beams, repetition_penalty=repetition_penalty)
    res = m[1].decode(out[0])

    print('ok')
    c.code(res)