File size: 1,280 Bytes
daf919b
 
60c3989
 
 
76e5e1a
60c3989
 
76e5e1a
a9f32fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20809a7
a9f32fb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr

# make function using import pip to install torch
import pip
pip.main(['install', 'torch'])
pip.main(['install', 'transformers'])

import torch
import transformers


# saved_model
def load_model(model_path):
    saved_data = torch.load(
        model_path,
        map_location="cpu"
    )

    bart_best = saved_data["model"]
    train_config = saved_data["config"]
    tokenizer = transformers.PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-base-v1')

    ## Load weights.
    model = transformers.BartForConditionalGeneration.from_pretrained('gogamza/kobart-base-v1')
    model.load_state_dict(bart_best)

    return model, tokenizer


# main
def inference(prompt):
    model_path = "./kobart-model-logical.pth"

    model, tokenizer = load_model(
        model_path=model_path
        )

    input_ids = tokenizer.encode(prompt)
    input_ids = torch.tensor(input_ids)
    input_ids = input_ids.unsqueeze(0)
    output = model.generate(input_ids)
    output = tokenizer.decode(output[0], skip_special_tokens=True)    

    return output


demo = gr.Interface(
    fn=inference, 
    inputs="text", 
    outputs="text" #return ๊ฐ’
    ).launch() # launch(share=True)๋ฅผ ์„ค์ •ํ•˜๋ฉด ์™ธ๋ถ€์—์„œ ์ ‘์† ๊ฐ€๋Šฅํ•œ ๋งํฌ๊ฐ€ ์ƒ์„ฑ๋จ

demo.launch()